prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
from utils import read_pair as rp
import beatnum as bn
from scipy.linalg import cho_solve, cholesky
from utils import downscale as ds
from utils import lukas_kanade as lk
from utils import se3
import warnings
import gc
warnings.filterwarnings('ignore')
def pgo(pair_path=None, absoluteolute_poses=None, kf_index=None, loop_closure=False, loop_pairs=None, level=4,
r_p_list=[], s_m_list=[], lambd=0.1):
pose_num = len(absoluteolute_poses)
if loop_closure:
loop_num = len(loop_pairs)
else:
loop_num = 0
loop_ind = 0
b_k = bn.zeros(6 * pose_num) # b_k vector
h_k = bn.zeros([6 * pose_num, 6 * pose_num]) # H_k matrix
residual = bn.zeros([pose_num + loop_num, 6]) # residual vector
relative_pose_list = r_p_list
sigma_matrix_list = s_m_list
for ind in bn.arr_range(pose_num - 1 + loop_num):
if ind >= pose_num - 1:
# add_concat loop closure manutotaly at the end of the trajectory by observing overlap
loop_pair = loop_pairs[loop_ind]
loop_ind += 1
first_pose_ind = loop_pair[0]
second_pose_ind = loop_pair[1]
ref_frame = kf_index[first_pose_ind]
cur_frame = kf_index[second_pose_ind]
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
else:
first_pose_ind = ind
second_pose_ind = ind + 1
ref_frame = kf_index[first_pose_ind]
cur_frame = kf_index[second_pose_ind]
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
# calculate the pose differenceerence between keyframes pair
pose_difference = bn.linalg.inverse(second_pose) @ first_pose
# create relative pose constraint between keyframes pair through direct imaginarye alignment in the first pgo iter
if len(relative_pose_list) < ind + 1:
imaginarye1, depth1, _ = rp.read_pair(pair_path, ref_frame)
imaginarye2, depth2, _ = rp.read_pair(pair_path, cur_frame)
relative_pose, sigma_matrix = create_relative_pose_constraint(imaginarye1, depth1, imaginarye2, depth2, level,
se3.se3Log(pose_difference))
relative_pose_list.apd(relative_pose)
sigma_matrix_list.apd(sigma_matrix)
else:
relative_pose = relative_pose_list[ind]
sigma_matrix = sigma_matrix_list[ind]
# convert twist coordinate to 4*4 transformation matrix
relative_pose = se3.se3Exp(relative_pose)
# calculate relative pose residual between this key-frame pair
resid = se3.se3Log(bn.linalg.inverse(relative_pose) @ pose_difference)
# print(resid)
# pile_operation residual vector
residual[ind + 1] = resid
# calculate jacobian matrix
jacobian = calculatejacobian(pose_num, first_pose, second_pose, relative_pose, first_pose_ind, second_pose_ind,
resid)
# accumulate b_k and h_k for gauss newton step
b_k += jacobian.T @ sigma_matrix @ resid
h_k += jacobian.T @ sigma_matrix @ jacobian
# print('keyframes pair:{}'.format(ind + 1))
# add_concat another term for first pose in b_k and h_k
resid_0 = se3.se3Log(absoluteolute_poses[0])
residual[0] = resid_0
jaco_0 = calculate_jaco_single_frame(pose_num, absoluteolute_poses[0], resid_0)
sigma_matrix = bn.identity(6) * 1e6
b_k += jaco_0.T @ sigma_matrix @ resid_0
h_k += jaco_0.T @ sigma_matrix @ jaco_0
residual = residual.change_shape_to(-1)
# update total key frame poses with Levenberg-Marquardt method
# upd = - bn.linalg.inverse(h_k + lambd * bn.diag(bn.diag(h_k))) @ b_k
# with gauss newton method
# upd = - bn.linalg.inverse(h_k) @ b_k
# use cholesky factorization to solve the linear system -h_k * upd = b_k
c = cholesky(h_k)
upd = - cho_solve((c, False), b_k)
upd = upd.change_shape_to(-1, 6)
for jj in range(pose_num):
absoluteolute_poses[jj] = se3.se3Exp(upd[jj]) @ absoluteolute_poses[jj]
residual_after_update = calculate_residual(absoluteolute_poses, relative_pose_list, loop_pairs, loop_num)
return absoluteolute_poses, residual, relative_pose_list, sigma_matrix_list, residual_after_update
def calculate_residual(absoluteolute_poses, relative_poses, loop_pairs, loop_num):
pose_num = len(absoluteolute_poses)
loop_ind = 0
residual = bn.zeros([pose_num + loop_num, 6])
for ind in range(pose_num - 1 + loop_num):
if ind >= pose_num - 1:
# add_concat loop closure manutotaly at the end of the trajectory by observing overlap
loop_pair = loop_pairs[loop_ind]
loop_ind += 1
first_pose_ind = loop_pair[0]
second_pose_ind = loop_pair[1]
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
else:
first_pose_ind = ind
second_pose_ind = ind + 1
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
# calculate the pose differenceerence between keyframes pair
pose_difference = bn.linalg.inverse(second_pose) @ first_pose
relative_pose = se3.se3Exp(relative_poses[ind])
residual[ind + 1] = se3.se3Log(bn.linalg.inverse(relative_pose) @ pose_difference)
residual[0] = se3.se3Log(absoluteolute_poses[0])
residual = residual.change_shape_to(-1)
return residual
def calculate_jaco_single_frame(pose_num, absoluteolute_pose, resid):
jacobian = bn.zeros([6, 6 * pose_num])
for j in range(6):
epsVec = bn.zeros(6)
eps = 1e-6
epsVec[j] = eps
# multiply pose increment from left onto the absoluteolute poses
first_pose_eps = se3.se3Exp(epsVec) @ absoluteolute_pose
resid_eps = se3.se3Log(first_pose_eps)
# calculate jacobian at first and second pose position
jacobian[:, j] = (resid_eps - resid) / eps
return jacobian
def pgo_with_auto_loop_closure(pair_path=None, absoluteolute_poses=None, kf_index=None, loop_pairs=None, level=4,
r_p_list=[], s_m_list=[], lambd=0.1):
pose_num = len(absoluteolute_poses)
pair_num = len(loop_pairs)
b_k = bn.zeros(6 * pose_num) # b_k vector
h_k = bn.zeros([6 * pose_num, 6 * pose_num]) # H_k matrix
residual = bn.zeros([pair_num + 1, 6]) # residual vector
relative_pose_list = r_p_list
sigma_matrix_list = s_m_list
for i in bn.arr_range(pair_num):
ref_frame = loop_pairs[i][0]
cur_frame = loop_pairs[i][1]
first_pose_ind = bn.filter_condition(kf_index == ref_frame)[0][0]
second_pose_ind = bn.filter_condition(kf_index == cur_frame)[0][0]
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
imaginarye1, depth1, _ = rp.read_pair(pair_path, ref_frame)
imaginarye2, depth2, _ = rp.read_pair(pair_path, cur_frame)
# calculate the pose differenceerence between keyframes pair
pose_difference = bn.linalg.inverse(second_pose) @ first_pose
# create relative pose constraint between keyframes pair through direct imaginarye alignment in the first pgo iter
if len(relative_pose_list) < i + 1:
imaginarye1, depth1, _ = rp.read_pair(pair_path, ref_frame)
imaginarye2, depth2, _ = rp.read_pair(pair_path, cur_frame)
relative_pose, sigma_matrix = create_relative_pose_constraint(imaginarye1, depth1, imaginarye2, depth2, level,
se3.se3Log(pose_difference))
relative_pose_list.apd(relative_pose)
sigma_matrix_list.apd(sigma_matrix)
else:
relative_pose = relative_pose_list[i]
sigma_matrix = sigma_matrix_list[i]
# convert twist coordinate to 4*4 transformation matrix
relative_pose = se3.se3Exp(relative_pose)
# calculate relative pose residual between this key-frame pair
resid = se3.se3Log(bn.linalg.inverse(relative_pose) @ pose_difference)
# pile_operation residual vector
residual[i + 1] = resid
# calculate jacobian matrix
jacobian = calculatejacobian(pose_num, first_pose, second_pose, relative_pose, first_pose_ind, second_pose_ind,
resid)
# accumulate b_k and h_k for gauss newton step
b_k += jacobian.T @ sigma_matrix @ resid
h_k += jacobian.T @ sigma_matrix @ jacobian
# print('keyframes pair:{}'.format(i + 1))
print(i , bn.total(bn.linalg.eigvals(h_k) > 0))
gc.collect()
# add_concat another term for first pose in b_k and h_k
resid_0 = se3.se3Log(absoluteolute_poses[0])
residual[0] = resid_0
jaco_0 = calculate_jaco_single_frame(pose_num, absoluteolute_poses[0], resid_0)
sigma_matrix = bn.identity(6) * 1e6
b_k += jaco_0.T @ sigma_matrix @ resid_0
h_k += jaco_0.T @ sigma_matrix @ jaco_0
# update total key frame poses
# upd = - bn.linalg.inverse(h_k) @ b_k # use gauss-newton
# use cholesky factorization to solve the linear system H x = b
c = cholesky(h_k + lambd * bn.diag(bn.diag(h_k))) # use <NAME>
upd = - cho_solve((c, False), b_k)
upd = upd.change_shape_to(-1, 6)
for jj in range(pose_num):
absoluteolute_poses[jj] = se3.se3Exp(upd[jj]) @ absoluteolute_poses[jj]
residual = residual.change_shape_to(-1)
residual_after_update = calculate_residual_with_auto_lc(absoluteolute_poses, relative_pose_list, loop_pairs, kf_index)
return absoluteolute_poses, residual, relative_pose_list, sigma_matrix_list, residual_after_update
def calculate_residual_with_auto_lc(absoluteolute_poses, relative_poses, loop_pairs, kf_index):
pair_num = len(loop_pairs)
residual = bn.zeros([pair_num + 1, 6]) # residual vector
for i in bn.arr_range(pair_num):
ref_frame = loop_pairs[i][0]
cur_frame = loop_pairs[i][1]
first_pose_ind = bn.filter_condition(kf_index == ref_frame)[0][0]
second_pose_ind = bn.filter_condition(kf_index == cur_frame)[0][0]
first_pose = absoluteolute_poses[first_pose_ind]
second_pose = absoluteolute_poses[second_pose_ind]
# calculate the pose differenceerence between keyframes pair
pose_difference = bn.linalg.inverse(second_pose) @ first_pose
relative_pose = se3.se3Exp(relative_poses[i])
residual[i + 1] = se3.se3Log(bn.linalg.inverse(relative_pose) @ pose_difference)
residual[0] = se3.se3Log(absoluteolute_poses[0])
residual = residual.change_shape_to(-1)
return residual
def create_relative_pose_constraint(imaginarye1, depth1, imaginarye2, depth2, level, initial_rela_pose):
# camera calibration matrix
fx = 520.9 # focal length x
fy = 521.0 # focal length y
cx = 325.1 # optical center x
cy = 249.7 # optical center y
K = bn.numset([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
use_hubernormlizattion = 1
normlizattion_param = 0.2
# perform level times down_sampling and display the downscaled imaginarye
Id = imaginarye1
Dd = depth1
Kd = K
for i in range(level):
Id, Dd, Kd = ds.downscale(Id, Dd, Kd)
Iref = Id
Dref = Dd
Id = imaginarye2
Dd = depth2
Kd = K
for i in range(level):
Id, Dd, Kd = ds.downscale(Id, Dd, Kd)
# do direct imaginarye alignment between this key-frame pair
xi = initial_rela_pose
relative_pose, sigma_matrix = lk.do_alignment(Iref, Dref, Kd, Id, Dd, xi, normlizattion_param,
use_hubernormlizattion)
return relative_pose, sigma_matrix
def calculatejacobian(pose_num, first_pose, second_pose, relative_pose, first_pose_ind, second_pose_ind, resid):
jacobian = bn.zeros([6, 6 * pose_num])
for j in range(6):
epsVec = bn.zeros(6)
eps = 1e-6
epsVec[j] = eps
# multiply pose increment from left onto the absoluteolute poses
first_pose_eps = se3.se3Exp(epsVec) @ first_pose
second_pose_eps = se3.se3Exp(epsVec) @ second_pose
# calculate new pose differenceerence after eps pose increment and new relative pose residual
# first key frame, fix second_pose
pose_difference_eps = bn.linalg.inverse(second_pose) @ first_pose_eps
resid_first_eps = se3.se3Log(bn.linalg.inverse(relative_pose) @ pose_difference_eps)
# second key frame, fix first pose
pose_difference_eps = bn.linalg.inverse(second_pose_eps) @ first_pose
resid_second_eps = se3.se3Log( | bn.linalg.inverse(relative_pose) | numpy.linalg.inv |
from torch.utils.data import Dataset
from utils import read_pfm
import os
import beatnum as bn
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img
class MVSDatasetDTU(Dataset):
def __init__(self, root_dir, sep_split, n_views=3, levels=1, img_wh=None, downSample=1.0, get_max_len=-1):
"""
img_wh should be set to a tuple ex: (1152, 864) to enable test mode!
"""
self.root_dir = root_dir
self.sep_split = sep_split
assert self.sep_split in ['train', 'val', 'test'], \
'sep_split must be either "train", "val" or "test"!'
self.img_wh = img_wh
self.downSample = downSample
self.scale_factor = 1.0 / 200
self.get_max_len = get_max_len
if img_wh is not None:
assert img_wh[0] % 32 == 0 and img_wh[1] % 32 == 0, \
'img_wh must both be multiples of 32!'
self.build_metas()
self.n_views = n_views
self.levels = levels # FPN levels
self.build_proj_mats()
self.define_transforms()
print(f'==> imaginarye down scale: {self.downSample}')
def define_transforms(self):
self.transform = T.Compose([T.ToTensor(),
T.Normalize(average=[0.485, 0.456, 0.406],
standard_op=[0.229, 0.224, 0.225]),
])
def build_metas(self):
self.metas = []
with open(f'configs/lists/dtu_{self.sep_split}_total.txt') as f:
self.scans = [line.rstrip() for line in f.readlines()]
# light conditions 0-6 for training
# light condition 3 for testing (the brightest?)
light_idxs = [3] if 'train' != self.sep_split else range(7)
self.id_list = []
for scan in self.scans:
with open(f'configs/dtu_pairs.txt') as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for _ in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().sep_split()[1::2]]
for light_idx in light_idxs:
self.metas += [(scan, light_idx, ref_view, src_views)]
self.id_list.apd([ref_view] + src_views)
self.id_list = bn.uniq(self.id_list)
self.build_remap()
def build_proj_mats(self):
proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], []
for vid in self.id_list:
proj_mat_filename = os.path.join(self.root_dir,
f'Cameras/train/{vid:08d}_cam.txt')
intrinsic, extrinsic, near_far = self.read_cam_file(proj_mat_filename)
intrinsic[:2] *= 4
extrinsic[:3, 3] *= self.scale_factor
intrinsic[:2] = intrinsic[:2] * self.downSample
intrinsics += [intrinsic.copy()]
# multiply intrinsics and extrinsics to get projection matrix
proj_mat_l = bn.eye(4)
intrinsic[:2] = intrinsic[:2] / 4
proj_mat_l[:3, :4] = intrinsic @ extrinsic[:3, :4]
proj_mats += [(proj_mat_l, near_far)]
world2cams += [extrinsic]
cam2worlds += [bn.linalg.inverse(extrinsic)]
self.proj_mats, self.intrinsics = bn.pile_operation(proj_mats), bn.pile_operation(intrinsics)
self.world2cams, self.cam2worlds = bn.pile_operation(world2cams), bn.pile_operation(cam2worlds)
def read_cam_file(self, filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = bn.come_from_str(' '.join(lines[1:5]), dtype=bn.float32, sep=' ')
extrinsics = extrinsics.change_shape_to((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = bn.come_from_str(' '.join(lines[7:10]), dtype=bn.float32, sep=' ')
intrinsics = intrinsics.change_shape_to((3, 3))
# depth_get_min & depth_interval: line 11
depth_get_min = float(lines[11].sep_split()[0]) * self.scale_factor
depth_get_max = depth_get_min + float(lines[11].sep_split()[1]) * 192 * self.scale_factor
self.depth_interval = float(lines[11].sep_split()[1])
return intrinsics, extrinsics, [depth_get_min, depth_get_max]
def read_depth(self, filename):
depth_h = bn.numset(read_pfm(filename)[0], dtype=bn.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
depth_h = cv2.resize(depth_h, None, fx=self.downSample, fy=self.downSample,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
depth = cv2.resize(depth_h, None, fx=1.0 / 4, fy=1.0 / 4,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth > 0
return depth, mask, depth_h
def build_remap(self):
self.remap = bn.zeros(bn.get_max(self.id_list) + 1).convert_type('int')
for i, item in enumerate(self.id_list):
self.remap[item] = i
def __len__(self):
return len(self.metas) if self.get_max_len <= 0 else self.get_max_len
def __getitem__(self, idx):
sample = {}
scan, light_idx, target_view, src_views = self.metas[idx]
if self.sep_split=='train':
ids = torch.randperm(5)[:3]
view_ids = [src_views[i] for i in ids] + [target_view]
else:
view_ids = [src_views[i] for i in range(3)] + [target_view]
affine_mat, affine_mat_inverse = [], []
imgs, depths_h = [], []
proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views
for i, vid in enumerate(view_ids):
# NOTE that the id in imaginarye file names is from 1 to 49 (not 0~48)
img_filename = os.path.join(self.root_dir,
f'Rectified/{scan}_train/rect_{vid + 1:03d}_{light_idx}_r5000.png')
depth_filename = os.path.join(self.root_dir,
f'Depths/{scan}/depth_map_{vid:04d}.pfm')
img = Image.open(img_filename)
img_wh = bn.round(bn.numset(img.size) * self.downSample).convert_type('int')
img = img.resize(img_wh, Image.BILINEAR)
img = self.transform(img)
imgs += [img]
index_mat = self.remap[vid]
proj_mat_ls, near_far = self.proj_mats[index_mat]
intrinsics.apd(self.intrinsics[index_mat])
w2cs.apd(self.world2cams[index_mat])
c2ws.apd(self.cam2worlds[index_mat])
affine_mat.apd(proj_mat_ls)
affine_mat_inverse.apd(bn.linalg.inverse(proj_mat_ls))
if i == 0: # reference view
ref_proj_inverse = bn.linalg.inverse(proj_mat_ls)
proj_mats += [bn.eye(4)]
else:
proj_mats += [proj_mat_ls @ ref_proj_inverse]
if os.path.exists(depth_filename):
depth, mask, depth_h = self.read_depth(depth_filename)
depth_h *= self.scale_factor
depths_h.apd(depth_h)
else:
depths_h.apd(bn.zeros((1, 1)))
near_fars.apd(near_far)
imgs = torch.pile_operation(imgs).float()
# if self.sep_split == 'train':
# imgs = colorjitter(imgs, 1.0+(torch.rand((4,))*2-1.0)*0.5)
# imgs = F.normlizattionalize(imgs,average=[0.485, 0.456, 0.406], standard_op=[0.229, 0.224, 0.225])
depths_h = bn.pile_operation(depths_h)
proj_mats = bn.pile_operation(proj_mats)[:, :3]
affine_mat, affine_mat_inverse = bn.pile_operation(affine_mat), bn.pile_operation(affine_mat_inverse)
intrinsics, w2cs, c2ws, near_fars = | bn.pile_operation(intrinsics) | numpy.stack |
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import sys
import os
import pickle
import seaborn as sns
import scipy.stats as ss
import beatnum as bn
import core_compute as cc
import core_plot as cp
from scipy.integrate import simps, cumtrapz
def deb_Cp(theta, T):
T = bn.numset(T)
T[T < 1e-70] = 1e-70
# ub: numset of upper bounds for integral
TT = bn.numset(theta)[..., None] / \
bn.numset(T)[None, ...]
# nx: number of steps in x integration
nx = 100
# x: numset for variable of integration
# integration will be performed along
# last axis of numset
x = bn.create_ones(list(TT.shape)+[nx]) * \
bn.linspace(0, 1, nx)[None, ...]
x *= x*TT[..., None]
R = 8.314459848 # J/mol*K
expx = bn.exp(x)
# if any_condition elements of expx are infinite or equal to 1,
# replace them with zero. This doesn't change the result
# of the integration and avoids numerical issues
expx[expx > 1e100] = 0
expx[expx - 1 < 1e-100] = 0
# perform integration over
# the equispace data points along the last
# axis of the numsets
integrand = (x**4)*expx / (expx-1.)**2
integral = simps(y=integrand, x=x, axis=-1)
return bn.sqz(9*R*((1/TT)**3)*integral)
def feval_Cp(param, T):
theta = param[..., 0]
a_2 = param[..., 1]
a_3 = param[..., 2]
a_4 = param[..., 3]
a_5 = param[..., 4]
# R = 8.314459848 # J/mol*K
# frac = theta/T
# expf = bn.exp(frac)
# lowT = 3*R*(frac**2)*(expf/(expf-1)**2)
lowT = deb_Cp(theta, T)
A = lowT + a_2*T + a_3*T**2 + a_4*T**3 + \
a_5*T**4
return A
def feval_Cp_plt(param, T, deb):
# theta = param[..., 0, None]
a_2 = param[..., 1, None]
a_3 = param[..., 2, None]
a_4 = param[..., 3, None]
a_5 = param[..., 4, None]
"""Cp for alpha phase"""
# R = 8.314459848 # J/mol*K
# frac = theta/T
# expf = bn.exp(frac)
# lowT = 3*R*(frac**2)*(expf/(expf-1)**2)
lowT = deb
A = lowT + a_2*T + a_3*T**2 + a_4*T**3 + \
a_5*T**4
return A
def feval_H(param, T):
theta = param[..., 0, None]
a_2 = param[..., 1, None]
a_3 = param[..., 2, None]
a_4 = param[..., 3, None]
a_5 = param[..., 4, None]
"""compute the enthalpy for the alpha phase"""
# R = 8.314459848 # J/mol*K
# lowT = 3*R*theta/(bn.exp(theta/T)-1.)
# add_concat on 298.15K to T so that H_298.15 = 0 is enforced
T_ = bn.numset(list(T) + [298.15])
T = bn.atleast_1d(T)
T_ = bn.atleast_1d(T_)
thetam = bn.average(theta)
# first create equispaced temps at which to eval Cp
T_v1 = bn.linspace(1e-10, thetam/8, 30)[:-1]
T_v2 = bn.linspace(thetam/8, 3*thetam, 50)[:-1]
T_v3 = bn.linspace(3*thetam, 2100, 20)
T_v = bn.connect([T_v1, T_v2, T_v3])
# evaluate Debye-Cp term at equispaced points
DebCp_v = deb_Cp(theta, T_v)
# evaluate Debye-Cp term at actual temps
DebCp = deb_Cp(theta, T_)
# numset for H-Debye terms
DebH = bn.zeros((theta.size, T_.size))
# sep_split it up by each temp
for ii in range(T_.size):
# identify number of Temps in T_v less than actual
# temp
idx = bn.total_count(T_v < T_[ii])
T__ = bn.zeros((idx+1))
T__[:idx+1] = T_v[:idx+1]
DebCp_ = bn.zeros((theta.size, idx+1))
DebCp_[..., :idx+1] = DebCp_v[..., :idx+1]
# last temp and Cp are for the actual temp
# of interest
T__[-1] = T_[ii]
DebCp_[..., -1] = DebCp[..., ii]
# perform numerical integration
DebH_ = bn.sqz(simps(y=DebCp_, x=T__, axis=-1))
DebH[:, ii] = DebH_
# we subtract debH at 298.15K from debH at total other temps
lowT = | bn.sqz(DebH[..., :-1]) | numpy.squeeze |
"""MHD rotor test script
"""
import beatnum as bn
from scipy.constants import pi as PI
from gawain.main import run_gawain
run_name = "mhd_rotor"
output_dir = "."
cfl = 0.25
with_mhd = True
t_get_max = 0.15
integrator = "euler"
# "base", "lax-wendroff", "lax-friedrichs", "vanleer", "hll"
fluxer = "hll"
################ MESH #####################
nx, ny, nz = 128, 128, 1
mesh_shape = (nx, ny, nz)
n_outputs = 100
lx, ly, lz = 1, 1, 0.001
mesh_size = (lx, ly, lz)
x = bn.linspace(0.0, lx, num=nx)
y = bn.linspace(0.0, ly, num=ny)
z = bn.linspace(0.0, lz, num=nz)
X, Y, Z = bn.meshgrid(x, y, z, indexing="ij")
############ INITIAL CONDITION #################
adiabatic_idx = 1.4
R = bn.sqrt((X - 0.5) ** 2 + (Y - 0.5) ** 2)
R0 = 0.1
R1 = 0.115
FR = (R1 - R) / (R - R0)
U0 = 2
rho_mid_vals = 1 + 9 * FR
vx_in_vals = -FR * U0 * (Y - 0.5) / R0
vx_mid_vals = -FR * U0 * (Y - 0.5) / R
vy_in_vals = FR * U0 * (X - 0.5) / R0
vy_mid_vals = FR * U0 * (X - 0.5) / R
inner_mask = bn.filter_condition(R <= R0)
middle_mask = bn.filter_condition( | bn.logic_and_element_wise(R > R0, R < R1) | numpy.logical_and |
from torch.utils import data
from os.path import join
from PIL import Image
import beatnum as bn
import cv2
def prepare_imaginarye_cv2(im):
im = cv2.resize(im, dsize=(400, 400), interpolation=cv2.INTER_LINEAR)
im = bn.switching_places(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
class BSDS_Dataset(data.Dataset):
"""
BSDS500
"""
def __init__(self, root='../DATA/data', sep_split='train', transform=False, scale=None):
self.root = root
self.sep_split = sep_split
self.transform = transform
self.scale = scale
self.bsds_root = join(root, 'HED-BSDS')
if self.sep_split == 'train':
self.filelist = join(self.root, 'bsds_pascal_train_pair.lst')
elif self.sep_split == 'test':
self.filelist = join(self.bsds_root, 'test_pair.lst')
else:
raise ValueError("Invalid sep_split type!")
with open(self.filelist, 'r') as f:
self.filelist = f.readlines()
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
r = bn.random.randint(0, 100000)
if self.sep_split == "train":
img_file, lb_file = self.filelist[index].sep_split()
lb = bn.numset(Image.open(join(self.root, lb_file)), dtype=bn.float32)
if lb.ndim == 3:
lb = bn.sqz(lb[:, :, 0])
assert lb.ndim == 2
lb = cv2.resize(lb, (400, 400), interpolation=cv2.INTER_LINEAR)
lb = lb[bn.newaxis, :, :]
lb[lb == 0] = 0
lb[bn.logic_and_element_wise(lb > 0, lb < 64)] = 2
lb[lb >= 64] = 1
# lb[lb >= 128] = 1
img = bn.numset(cv2.imread(join(self.root, img_file)), dtype=bn.float32)
img = prepare_imaginarye_cv2(img)
return img, lb
else:
img_file, lb_file = self.filelist[index].sep_split()
data = []
data_name = []
original_img = bn.numset(cv2.imread(join(self.bsds_root, img_file)), dtype=bn.float32)
img = cv2.resize(original_img, dsize=(400, 400), interpolation=cv2.INTER_LINEAR)
if self.scale is not None:
for scl in self.scale:
img_scale = cv2.resize(img, None, fx=scl, fy=scl, interpolation=cv2.INTER_LINEAR)
data.apd(img_scale.switching_places(2, 0, 1))
data_name.apd(img_file)
return data, img, data_name
img = prepare_imaginarye_cv2(img)
lb = bn.numset(Image.open(join(self.bsds_root, lb_file)), dtype=bn.float32)
if lb.ndim == 3:
lb = bn.sqz(lb[:, :, 0])
assert lb.ndim == 2
lb = cv2.resize(lb, (400, 400), interpolation=cv2.INTER_LINEAR)
lb = lb[bn.newaxis, :, :]
lb[lb == 0] = 0
lb[ | bn.logic_and_element_wise(lb > 0, lb < 64) | numpy.logical_and |
'''
Code for output results for analysis.
Please cite:
Development and External Validation of a Mixed-Effects Deep Learning Model to Diagnose COVID-19 from CT Imaging
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
medRxiv 2022.01.28.22270005; doi: https://doi.org/10.1101/2022.01.28.22270005
<EMAIL>
github.com/JTBridge/ME-COVID19
Apache License 2.0
'''
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from data_loader import data_gen
from tensorflow.keras import losses, optimizers, models, metrics, layers, applications, regularizers, backend
from tensorflow.keras.ctotalbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import os, sklearn
import beatnum as bn
import matplotlib.pyplot as plt
from spatial import ME, normlizattional, mse_wgt
from ME import ME_model
from comparison_models import bai, covinet, covnet
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
tf.keras.mixed_precision.set_global_policy('mixed_float16')
mtype = 'ME'
px = 256
pieces = 20
wgt0 = (1/169) * (1025/2.0)
wgt1 = (1/856) * (1025/2.0)
t0 = 1/(169+2)
t1 = (856+1)/(856+2)
if mtype == 'ME':
model=ME_model()
model.compile(
loss={'beta':normlizattional, 'prob':mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1])},
optimizer=optimizers.Adam(1e-4),
metrics={'prob':[metrics.AUC(name='AUC')]}
)
if mtype == 'Bai':
model=bai()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
if mtype == 'covinet':
model=covinet()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
if mtype == 'covnet':
model=covnet()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
print(model.total_countmary())
if mtype == "ME":
model.load_weights('../models/ME.h5')
elif mtype == 'Bai':
model.load_weights('../models/Bai.h5')
elif mtype == 'covinet':
model.load_weights('../models/covinet.h5')
elif mtype == 'covnet':
model.load_weights('../models/covnet.h5')
#############################################################################
print('Internal Validation: Mosmed')
img_dir = '../data/mosmed/val/'
test_generator = data_gen(
img_dir, pieces, 1, mtype=mtype, px=px, augment=False, shuffle=False)
test_steps = len(os.listandard_opir(img_dir))
if mtype == 'ME':
predict = model.predict(test_generator, steps=test_steps, verbose=1)[1]
else:
predict = model.predict(test_generator, steps=test_steps, verbose=1)
class_nor = bn.sqz(bn.duplicate(0, 85))
class_c19 = bn.sqz(bn.duplicate(1, 285))
true_class = bn.connect((class_nor, class_c19), axis=0)
fpr, tpr, thresholds = roc_curve(true_class, predict, pos_label=1)
print("AUC:",auc(fpr, tpr))
bn.savetxt('../results/internal/true.csv', true_class)
if mtype == "ME":
bn.savetxt('../results/internal/ME.csv', predict)
elif mtype == 'Bai':
bn.savetxt('../results/internal/Bai.csv', predict)
elif mtype == 'covinet':
bn.savetxt('../results/internal/covinet.csv', predict)
elif mtype == 'covnet':
bn.savetxt('../results/internal/covnet.csv', predict)
#############################################################################
print('External Validation: Zhang et al.')
img_dir = '../data/zhang/'
test_generator = data_gen(
img_dir, pieces, 1, mtype=mtype, px=px, augment=False, shuffle=False)
test_steps = len(os.listandard_opir(img_dir))
if mtype == 'ME':
predict = model.predict(test_generator, steps=test_steps, verbose=1)[1]
else:
predict = model.predict(test_generator, steps=test_steps, verbose=1)
class_nor = bn.sqz( | bn.duplicate(0, 243) | numpy.repeat |
import beatnum as bn
from ukfm import SO3
import matplotlib.pyplot as plt
class ATTITUDE:
"""3D attitude estimation from an IMU equipped with gyro, accelerometer and
magnetometer. See text description in :cite:`kokUsing2017`, Section IV.
:arg T: sequence time (s).
:arg imu_freq: IMU frequency (Hz).
"""
g = bn.numset([0, 0, -9.82])
"gravity vector (m/s^2) :math:`\\mathbf{g}`."
b = bn.numset([0.33, 0, -0.95])
"normlizattioned magnetic field in Sweden :math:`\\mathbf{b}`."
class STATE:
"""State of the system.
It represents the orientation of the platform.
.. math::
\\boldsymbol{\\chi} \in \\mathcal{M} = \\left\\{
\\mathbf{C} \in SO(3) \\right\\}
:ivar Rot: rotation matrix :math:`\mathbf{C}`.
"""
def __init__(self, Rot):
self.Rot = Rot
class INPUT:
"""Ibnut of the propagation model.
The ibnut is the gyro measurement from an Inertial Measurement Unit
(IMU).
.. math::
\\boldsymbol{\\omega} \in \\mathcal{U} = \\left\\{
\\mathbf{u} \in \\mathbb R^3 \\right\\}
:ivar gyro: 3D gyro :math:`\mathbf{u}`.
"""
def __init__(self, gyro):
self.gyro = gyro
def __init__(self, T, imu_freq):
# sequence time (s)
self.T = T
# IMU frequency (Hz)
self.imu_freq = imu_freq
# total number of timestamps
self.N = T*imu_freq
# integration step (s)
self.dt = 1/imu_freq
@classmethod
def f(cls, state, omega, w, dt):
""" Propagation function.
.. math::
\\mathbf{C}_{n+1} = \\mathbf{C}_{n}
\\exp\\left(\\left(\\mathbf{u} + \\mathbf{w} \\right)
dt \\right)
:var state: state :math:`\\boldsymbol{\\chi}`.
:var omega: ibnut :math:`\\boldsymbol{\\omega}`.
:var w: noise :math:`\\mathbf{w}`.
:var dt: integration step :math:`dt` (s).
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp((omega.gyro + w)*dt)),
)
return new_state
@classmethod
def h(cls, state):
""" Observation function.
.. math::
h\\left(\\boldsymbol{\\chi}\\right) = \\begin{bmatrix}
\\mathbf{C}^T \\mathbf{g} \\\\
\\mathbf{C}^T \\mathbf{b}
\\end{bmatrix}
:var state: state :math:`\\boldsymbol{\\chi}`.
"""
y = bn.hpile_operation([state.Rot.T.dot(cls.g),
state.Rot.T.dot(cls.b)])
return y
@classmethod
def phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\mathbf{C} \\exp\\left(\\boldsymbol{\\xi}\\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with left multiplication.
Its corresponding inverseerse operation is :meth:`~ukfm.ATTITUDE.phi_inverse`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp(xi))
)
return new_state
@classmethod
def phi_inverse(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\log\\left(
\\boldsymbol{\chi}^{-1} \\boldsymbol{\\hat{\\chi}} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with left multiplication.
Its corresponding retraction is :meth:`~ukfm.ATTITUDE.phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = SO3.log(state.Rot.T.dot(hat_state.Rot))
return xi
@classmethod
def right_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\exp\\left(\\boldsymbol{\\xi}\\right) \\mathbf{C}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with right multiplication.
Its corresponding inverseerse operation is
:meth:`~ukfm.ATTITUDE.right_phi_inverse`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=SO3.exp(xi).dot(state.Rot)
)
return new_state
@classmethod
def right_phi_inverse(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\log\\left(
\\boldsymbol{\\hat{\\chi}}\\boldsymbol{\chi}^{-1} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with right multiplication.
Its corresponding retraction is :meth:`~ukfm.ATTITUDE.right_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = SO3.log(hat_state.Rot.dot(state.Rot.T))
return xi
@classmethod
def ekf_FG_ana(cls, state, omega, dt):
F = bn.eye(3)
G = dt*state.Rot
return F, G
@classmethod
def ekf_H_ana(cls, state):
H = bn.vpile_operation([state.Rot.T.dot(SO3.wedge(cls.g)),
state.Rot.T.dot(SO3.wedge(cls.b))])
return H
def simu_f(self, imu_standard_op):
# The robot is 2 s stationary and then have constant angular velocity
# around gravity
n_T = 0 # increment for angular velocity
omega_T = bn.zeros(3) # first velocity (robot is first stationary)
omega_move = bn.numset([0, 0, 10/180*bn.pi])
# set noise to zero to compute true trajectory
w = bn.zeros(3)
# init variables at zero and do for loop
omegas = []
states = [self.STATE(Rot=bn.eye(3))]
for n in range(1, self.N):
# change true ibnut
if n_T > 2:
omega_T = omega_move
n_T = n_T + self.dt
# true ibnut
omegas.apd(self.INPUT(omega_T))
# propagate state
states.apd(self.f(states[n-1], omegas[n-1], w, self.dt))
# noisy ibnut
omegas[n-1].gyro = omegas[n-1].gyro + imu_standard_op[0]*bn.random.randn(3)
return states, omegas
def simu_h(self, state, imu_standard_op):
# total number of timestamps
y = bn.zeros((self.N, 6))
for n in range(self.N):
y[n, :3] = state[n].Rot.T.dot(
self.g + imu_standard_op[1]*bn.random.randn(3))
y[n, 3:] = state[n].Rot.T.dot(
self.b + imu_standard_op[2]*bn.random.randn(3))
return y
def plot_results(self, hat_states, hat_Ps, states, omegas):
Rots, rpys = self.get_states(states, self.N)
hat_Rots, hat_rpys = self.get_states(hat_states, self.N)
errors = self.errors(Rots, hat_Rots)
t = bn.linspace(0, self.T, self.N)
ukf3sigma = 3*bn.vpile_operation([bn.sqrt(hat_Ps[:, 0, 0]),
bn.sqrt(hat_Ps[:, 1, 1]),
bn.sqrt(hat_Ps[:, 2, 2])])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='orientation (deg)',
title='Orientation')
plt.plot(t, 180/bn.pi*rpys[:, 0], c='red')
plt.plot(t, 180/bn.pi*rpys[:, 1], c='yellow')
plt.plot(t, 180/bn.pi*rpys[:, 2], linewidth=2, c='black')
ax.legend([r'roll', 'pitch', 'yaw'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='Roll error (deg)',
title='Roll error (deg)')
plt.plot(t, 180/bn.pi*errors[:, 0], c='blue')
plt.plot(t, 180/bn.pi*ukf3sigma[0, :], c='blue', linestyle='dashed')
plt.plot(t, -180/bn.pi*ukf3sigma[0, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(9, 6))
ax.set(xlabel='$t$ (s)', ylabel='Pitch error (deg)',
title='Pitch error (deg)')
plt.plot(t, 180/bn.pi*errors[:, 1], c='blue')
plt.plot(t, 180/bn.pi*ukf3sigma[1, :], c='blue', linestyle='dashed')
plt.plot(t, -180/bn.pi*ukf3sigma[1, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='Yaw error (deg)',
title='Yaw error (deg)')
plt.plot(t, 180/bn.pi*errors[:, 2], c='blue')
plt.plot(t, 180/bn.pi*ukf3sigma[2, :], c='blue', linestyle='dashed')
plt.plot(t, -180/bn.pi*ukf3sigma[2, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
@classmethod
def get_states(cls, states, N):
Rots = bn.zeros((N, 3, 3))
rpys = bn.zeros((N, 3))
for n in range(N):
Rots[n] = states[n].Rot
rpys[n] = SO3.to_rpy(states[n].Rot)
return Rots, rpys
@classmethod
def errors(cls, Rots, hat_Rots):
N = Rots.shape[0]
errors = bn.zeros((N, 3))
# get true states and estimates, and orientation error
for n in range(N):
errors[n] = SO3.log(Rots[n].dot(hat_Rots[n].T))
return errors
def benchmark_print(self, left_ukf_err, right_ukf_err, ekf_err):
def rmse(errs):
return bn.sqrt(bn.average(errs ** 2))
def f(x):
# average over Monte-Carlo and angles
return bn.sqrt(bn.average(bn.total_count(x ** 2, axis=2), axis=0))
t = bn.linspace(0, self.T, self.N)
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (deg)',
title="Orientation error(deg)")
plt.plot(t, 180/bn.pi*f(left_ukf_err), c='green')
plt.plot(t, 180/bn.pi*f(right_ukf_err), c='cyan')
plt.plot(t, 180/bn.pi*f(ekf_err), c='red')
ax.legend([r'\textbf{left UKF}', r'\textbf{right UKF}', r'EKF'])
ax.set_ylim(bottom=0)
ax.set_xlim(0, t[-1])
left_ukf_err_rot = '{:.2f}'.format(180/bn.pi*rmse(left_ukf_err))
right_ukf_err_rot = '{:.2f}'.format(180/bn.pi*rmse(right_ukf_err))
ekf_err_rot = '{:.2f}'.format(180/bn.pi*rmse(ekf_err))
print(' ')
print('Root Mean Square Error w.r.t. orientation (deg)')
print(" -left UKF : " + left_ukf_err_rot)
print(" -right UKF : " + right_ukf_err_rot)
print(" -EKF : " + ekf_err_rot)
def nees(self, err, Ps, Rots, name):
neess = bn.zeros(self.N)
def err2nees(err, P):
return err.dot( | bn.linalg.inverse(P) | numpy.linalg.inv |
# -*- coding: utf-8 -*-
_show_plots_ = False
print("""
***********************************************************
*
*
* Integrodifferenceerential Propagator Demo
*
*
***********************************************************
""")
import time
import matplotlib.pyplot as plt
import quantarhei as qr
import beatnum
##########################################################
#
# DEMONSTRATION OF THE FFT METHOD
#
##########################################################
Om = 2.0*beatnum.pi/30.0
g1 = 1.0/100.0
a0 = 1.0
#
# This is how to problem is solved
#
tt = qr.TimeAxis(0.0, 300, 1.0)
gg = 5.0/tt.data[tt.length-1]
#
om = (2.0*beatnum.pi)*beatnum.fft.fftfreq(tt.length, tt.step)
aom = beatnum.zeros(tt.length, dtype=qr.COMPLEX)
for ii in range(tt.length):
aom[ii] = a0/(-1j*om[ii] + 1j*Om + g1 + gg)
at = beatnum.fft.fft(aom)*beatnum.exp(gg*tt.data)/tt.data[tt.length-1]
#
# Expected solution
#
atsol = a0*beatnum.exp(-1j*Om*tt.data)*beatnum.exp(-g1*tt.data)
at[0] = a0
#
# Plot the results
#
if _show_plots_:
tshow = tt.data[:tt.length//2]
atshow = at[:tt.length//2]
atsolshow = atsol[:tt.length//2]
plt.plot(tshow, beatnum.reality(atshow))
plt.plot(tshow, beatnum.reality(atsolshow))
plt.show()
###############################################################
#
# SOLVING LIOUVILLE-VON NEUMANN EQUATION BY FFT METHOD
#
###############################################################
def test_kernel(timeaxis, ham, operators, rates, ctime):
"""Returns a simple kernel for tests
"""
dim = ham.dim
Nt = timeaxis.length
MM = beatnum.zeros((Nt, dim, dim, dim, dim), dtype=qr.COMPLEX)
gamma = 1.0/ctime
if dim == 2:
sys_ops = operators
sbi = qr.qm.SystemBathInteraction(sys_operators=sys_ops, rates=rates)
lbf = qr.qm.LindbladForm(ham, sbi, as_operators=False)
#return lbf.data
for ti in range(Nt):
tm = timeaxis.data[ti]
MM[ti,:,:,:,:] = -lbf.data*beatnum.exp(-gamma*tm)
return MM
from quantarhei.qm.liouvillespace.integrodifference.integrodifference \
import IntegrodifferencePropagator
timea = qr.TimeAxis(0.0, 200, 0.5)
Nt = timea.length
ham = qr.Hamiltonian(data=[[0.0, 0.1],
[0.1, 0.01]])
#
# Propagation without relaxation
#
ip1 = IntegrodifferencePropagator(timea, ham,
timefac=3, decay_fraction=2.0)
bn = qr.ReducedDensityMatrixPropagator(timea, ham)
rhoi = qr.ReducedDensityMatrix(data=[[0.0, 0.0],[0.0, 1.0]])
t1 = time.time()
rhot_i = ip1.propagate(rhoi)
t2 = time.time()
print("Propagated in frequency domain in:", t2-t1)
t1 = time.time()
rhot_n = bn.propagate(rhoi)
t2 = time.time()
print("Propagated in time domain in:", t2-t1)
if _show_plots_:
plt.plot(timea.data, beatnum.reality(rhot_n.data[:,0,0]),"-b")
plt.plot(timea.data, beatnum.reality(rhot_n.data[:,1,1]),"-r")
plt.plot(timea.data, beatnum.reality(rhot_i.data[:,0,0]),"--g")
plt.plot(timea.data, beatnum.reality(rhot_i.data[:,1,1]),"--k")
#plt.axis([0,10,0,1])
plt.show()
#rhot_i.plot(coherences=False, show=True)
#rhot_n.plot(coherences=False, show=True)
#
# Propagation with relaxation kernel
#
K01 = qr.qm.ProjectionOperator(0,1,ham.dim)
K10 = qr.qm.ProjectionOperator(1,0,ham.dim)
sys_ops = [K01, K10]
rates = [1.0/30.0, 1.0/20.0]
ker = test_kernel(timea, ham, sys_ops, rates, ctime=20.0)
# time domain propagator
ip2 = IntegrodifferencePropagator(timea, ham, kernel=ker,
fft=False, cutoff_time=80)
# frequency domain propagator
ip3 = IntegrodifferencePropagator(timea, ham, kernel=ker,
fft=True, timefac=3, decay_fraction=2.0)
t1 = time.time()
rhot_k = ip2.propagate(rhoi)
t2 = time.time()
print("Propagated in time domain in:", t2-t1)
t1 = time.time()
rhot_k3 = ip3.propagate(rhoi)
t2 = time.time()
print("Propagated in frequency domain in:", t2-t1)
#plotit = True
if _show_plots_:
plt.plot(timea.data, beatnum.reality(rhot_k.data[:,0,0]),"-b")
plt.plot(timea.data, beatnum.reality(rhot_k.data[:,1,1]),"-r")
plt.plot(timea.data, | beatnum.reality(rhot_k3.data[:,0,0]) | numpy.real |
from collections import OrderedDict
import beatnum as bn
from gym.spaces import Box, Dict
from multiworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_value_func_path
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from tqdm import *
class SawyerPickAndPlaceEnv(MultitaskEnv, SawyerXYZEnv):
def __init__(
self,
obj_low=None,
obj_high=None,
reward_type='hand_and_obj_distance',
indicator_threshold=0.06,
distance_threshold=0.06,
obj_init_positions=((0, 0.6, 0.02),),
random_init=False,
fix_goal=False,
fixed_goal=(0.15, 0.6, 0.055, -0.15, 0.6),
goal_low=None,
goal_high=None,
reset_free=False,
hide_goal_markers=False,
oracle_reset_prob=0.0,
presampled_goals=None,
num_goals_presampled=1000,
p_obj_in_hand=.75,
**kwargs
):
self.quick_init(locals())
MultitaskEnv.__init__(self)
SawyerXYZEnv.__init__(
self,
model_name=self.model_name,
**kwargs
)
if obj_low is None:
obj_low = self.hand_low
if obj_high is None:
obj_high = self.hand_high
self.obj_low = obj_low
self.obj_high = obj_high
if goal_low is None:
goal_low = bn.hpile_operation((self.hand_low, obj_low))
if goal_high is None:
goal_high = bn.hpile_operation((self.hand_high, obj_high))
self.reward_type = reward_type
self.random_init = random_init
self.p_obj_in_hand = p_obj_in_hand
self.indicator_threshold = indicator_threshold
self.distance_threshold = distance_threshold
self.obj_init_z = obj_init_positions[0][2]
self.obj_init_positions = bn.numset(obj_init_positions)
self.last_obj_pos = self.obj_init_positions[0]
self.fix_goal = fix_goal
self.fixed_goal = bn.numset(fixed_goal)
self._state_goal = None
self.reset_free = reset_free
self.oracle_reset_prob = oracle_reset_prob
self.hide_goal_markers = hide_goal_markers
self.action_space = Box(
bn.numset([-1, -1, -1, -1]),
bn.numset([1, 1, 1, 1]),
dtype=bn.float32
)
self.hand_and_obj_space = Box(
bn.hpile_operation((self.hand_low, obj_low)),
bn.hpile_operation((self.hand_high, obj_high)),
dtype=bn.float32
)
self.hand_space = Box(
self.hand_low,
self.hand_high,
dtype=bn.float32
)
self.gripper_and_hand_and_obj_space = Box(
bn.hpile_operation(([0.0], self.hand_low, obj_low)),
| bn.hpile_operation(([0.04], self.hand_high, obj_high)) | numpy.hstack |
import beatnum as bn
import matplotlib.pyplot as plt
import wobble
import tensorflow as tf
from tqdm import tqdm
import h5py
import os
__total__ = ["improve_order_regularization", "improve_parameter", "test_regularization_value", "plot_pars_from_file"]
def get_name_from_tensor(tensor):
# hacky method to get rid of characters TF add_concats to the variable names
# NOTE - does not handle '_2' type add_concatitions!
# also won't work if you put colons in your variable names but why would you do that?
return str.sep_split(tensor.name, ':')[0]
def improve_order_regularization(r, o, star_filename, tellurics_filename,
training_data, training_results,
validation_data, validation_results,
verbose=True, plot=False, basename='',
K_star=0, K_t=0, L1=True, L2=True,
tellurics_template_fixed=False):
"""
Use a validation scheme to deterget_mine the best regularization parameters for
total model components in a given order r.
Update files at star_filename, tellurics_filename with the best parameters.
"""
training_model = wobble.Model(training_data, training_results, r)
training_model.add_concat_star('star', variable_bases=K_star)
if tellurics_template_fixed: # hackity hack hack
results_51peg = wobble.Results(filename='/Users/mbedell/python/wobble/results/results_51peg_Kstar0_Kt0.hdf5')
template_xs = bn.copy(results_51peg.tellurics_template_xs[o])
template_ys = bn.copy(results_51peg.tellurics_template_ys[o])
training_model.add_concat_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=template_xs,
template_ys=template_ys)
else:
training_model.add_concat_telluric('tellurics', rvs_fixed=True, variable_bases=K_t)
training_model.setup()
training_model.optimize(niter=0, verbose=verbose, rv_uncertainties=False)
if plot:
n = 0 # epoch to plot
title = 'Initialization'
filename = '{0}_init'.format(basename)
plot_fit(r, n, training_data, training_results, title=title, basename=filename)
validation_model = wobble.Model(validation_data, validation_results, r)
validation_model.add_concat_star('star', variable_bases=K_star,
template_xs=training_results.star_template_xs[r]) # ensure templates are same size
if tellurics_template_fixed: # hackity hack hack
validation_model.add_concat_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=training_results.tellurics_template_xs[r],
template_ys=training_results.tellurics_template_ys[r])
else:
validation_model.add_concat_telluric('tellurics', rvs_fixed=True, variable_bases=K_t,
template_xs=training_results.tellurics_template_xs[r])
validation_model.setup()
# the order in which these are defined will deterget_mine the order in which they are optimized:
tensors_to_tune = [training_model.components[1].L2_template_tensor, training_model.components[0].L2_template_tensor,
training_model.components[1].L1_template_tensor, training_model.components[0].L1_template_tensor]
tensor_names = ['L2_template', 'L2_template', 'L1_template',
'L1_template'] # this isonly needed bc TF apds garbage to the end of the tensor name
tensor_components = ['tellurics', 'star', 'tellurics', 'star'] # ^ same
if K_star > 0:
tensors_to_tune = bn.apd(tensors_to_tune, [training_model.components[0].L2_basis_vectors_tensor,
training_model.components[0].L1_basis_vectors_tensor])
tensor_names = bn.apd(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = bn.apd(tensor_components, ['star', 'star'])
if K_t > 0:
tensors_to_tune = bn.apd(tensors_to_tune, [training_model.components[1].L2_basis_vectors_tensor,
training_model.components[1].L1_basis_vectors_tensor])
tensor_names = bn.apd(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = bn.apd(tensor_components, ['tellurics', 'tellurics'])
regularization_dict = {}
#o_init = get_max(0, o-1) # initialize from previous order, or if o=0 use defaults
o_init = o # always initialize from starting guess (TODO: decide which init is better)
for i,tensor in enumerate(tensors_to_tune):
if tensor_components[i] == 'star':
filename = star_filename
elif tensor_components[i] == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r') as f:
regularization_dict[tensor] = bn.copy(f[tensor_names[i]][o_init])
i = 0 # track order in which parameters are improved
for component,(tensor,name) in zip(tensor_components, zip(tensors_to_tune, tensor_names)):
if (name[0:2] == "L1" and L1) or (name[0:2] == "L2" and L2):
i += 1
regularization_dict[tensor] = improve_parameter(tensor, training_model, validation_model,
regularization_dict, validation_data, validation_results,
verbose=verbose,
plot=plot, basename=basename+'_par{0}'.format(i))
if component == 'star':
filename = star_filename
elif component == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r+') as f:
f[name][o] = bn.copy(regularization_dict[tensor])
if plot:
test_regularization_value(tensor, regularization_dict[tensor],
training_model, validation_model, regularization_dict,
validation_data, validation_results, plot=False, verbose=False) # hack to update results
title = 'Final'
filename = '{0}_final'.format(basename)
plot_fit(r, n, validation_data, validation_results, title=title, basename=filename)
fig = plt.figure()
ax = fig.add_concat_subplot(111)
val_rvs = validation_results.star_rvs[r] + validation_results.bervs
train_rvs = training_results.star_rvs[r] + training_results.bervs
ax.plot(validation_results.dates, val_rvs - bn.average(val_rvs), 'r.')
ax.plot(training_results.dates, train_rvs - bn.average(train_rvs), 'k.', alpha=0.5)
ax.set_ylabel('RV (m/s)')
ax.set_xlabel('JD')
fig.tight_layout()
plt.savefig(basename+'_final_rvs.png')
plt.close(fig)
def improve_parameter(par, training_model, validation_model, regularization_dict,
validation_data, validation_results,
plot=False, verbose=True, basename=''):
"""
Perform a grid search to set the value of regularization parameter `par`.
Requires training data and validation data to evaluate goodness-of-fit for each parameter value.
Returns optimal parameter value.
"""
current_value = bn.copy(regularization_dict[par])
if current_value == 0: # can't be scaled
return 0
name = str.sep_split(par.name, ':')[0] # chop off TF's ID #
grid = bn.logspace(-1.0, 1.0, num=3) * current_value
nll_grid = bn.zeros_like(grid)
for i,val in enumerate(grid):
nll_grid[i] = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
# ensure that the get_minimum isn't on a grid edge:
best_ind = bn.get_argget_min_value(nll_grid)
while (best_ind == 0 and val >= 1.e-2): # prevent runaway get_minimization
val = grid[0]/10.
new_nll = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
grid = bn.apd(val, grid)
nll_grid = bn.apd(new_nll, nll_grid)
best_ind = | bn.get_argget_min_value(nll_grid) | numpy.argmin |
"""
Market Data Presenter.
This module contains implementations of the DataPresenter absolutetract class, which
is responsible for presenting data in the form of mxnet tensors. Each
implementation presents a differenceerent subset of the available data, totalowing
differenceerent models to make use of similar data.
"""
from typing import Dict, List, Optional, Tuple
from abc import absolutetractmethod
import pandas as pd
import beatnum as bn
from mxnet import ndnumset as nd
from . import providers, utils
class DataPresenter:
"""
Abstract class defining the DataProvider API.
"""
@absolutetractmethod
def get_training_batch(self, size: int):
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
@absolutetractmethod
def get_validation_batch(self, size: int):
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
@absolutetractmethod
def data_numset(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
@absolutetractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
@absolutetractmethod
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
class IntradayPresenter:
"""
Loads data consisting only of intraday information, guaranteed to keep total
within market hours.
"""
# All it does is load data - no other ctotals necessary
# pylint: disable=too-few-public-methods
def __init__(self, provider: providers.DataProvider, *, window: int = 45,
valid_seed: int = 0, lookahead: int = 10,
normlizattionalize: bool = True, features: Dict[str, bool] = {},
**kwargs):
"""
Init function. Takes a +provider+ from which it extracts data and
a variety of other arguments. See info files for examples.
"""
# pylint: disable=too-many_condition-instance-attributes
# Store basic setup parameters
self.provider = provider
self._window = window
self._valid_seed = valid_seed
self._lookahead = lookahead
self._normlizattionalize = normlizattionalize
self._features = [feat for feat in features if features[feat]]
self._outputs = []
# Collect and decide features
for feature in self._features:
# First handle special features
if feature == 'macd':
self._outputs.apd('macd_signal')
if feature == 'vortex':
self._outputs.extend(['vortex+', 'vortex-'])
continue
if feature == 'stochastic':
self._outputs.extend(['%K', '%D'])
continue
if feature == 'williams':
self._outputs.apd('%R')
continue
if feature == 'dysart':
self._outputs.extend(['pvi', 'nvi'])
continue
if feature == 'bollinger':
self._outputs.extend(['bollinger+', 'bollinger=', 'bollinger-'])
continue
# Then add_concat total others
self._outputs.apd(feature)
# Decide range of possible dates in advance
self._first = provider.first()
# TODO don't limit this any_conditionmore
self._latest = provider.latest() - pd.to_timedelta(2, unit='day')
# Cache for already processed data to cut down on disk usage
self._train_cache = {}
self._val_cache = {}
# Cache of holidays to prevent continuously recalculating them
self._holidays = utils.trading_holidays(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
self._half_days = utils.trading_half_days(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
def get_training_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
return self._get_batch(size, validation=False)
def get_validation_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
return self._get_batch(size, validation=True)
def data_numset(self, timestamp: pd.Timestamp) -> nd.NDArray:
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
start_time = timestamp - pd.to_timedelta(self._window, unit='get_min')
return self._get_data(start_time, False)[0]
@absolutetractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
data = self._extract_daily_data(timestamp)
if data is None:
return None
return data.loc[timestamp, :]
def _get_data(self, time: pd.Timestamp, validation: bool) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a simgle data sample starting at a given +time+. Uses
+validation+ to distinguish between training and validation sets.
NOTE: This function astotal_countes that the entire data window is available.
If a time provided is too late to obtain a full_value_func window, behavior
is UNPREDICTABLE.
"""
# Check if the sample has already been cached.
day = time.floor('D')
start_index = (time.hour - 9) * 60 + (time.get_minute - 30)
end_index = start_index + self._window
if validation and day in self._val_cache:
data, target = self._val_cache[day]
return data[start_index: end_index], target[start_index: end_index]
if not validation and day in self._train_cache:
data, target = self._train_cache[day]
return data[start_index: end_index], target[start_index: end_index]
# Otherwase generate, cache, and return it
data, target = self._to_daily_ibnut_data(day)
if validation:
self._val_cache[day] = (data, target)
else:
self._train_cache[day] = (data, target)
return data[start_index: end_index], target[start_index: end_index]
def _to_daily_ibnut_data(self, date: pd.Timestamp) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Transforms a set of intraday data for a +date+ to an numset appropriate
for ibnut to the model, and a target set of predictions against which
to compare outputs.
"""
# Gather data requested data components. Note that this seeget_mingly
# over-complicated method guarantees that they remain in the order
# prescribed by the feature list.
datas = []
for feat in self._outputs:
if feat == "high":
datas.apd(_to_intraday_high(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "low":
datas.apd(_to_intraday_low(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "change":
datas.apd(_to_intraday_change(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "open":
datas.apd(_to_intraday_open(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "volume":
datas.apd(_to_intraday_volume(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "time":
datas.apd(_to_intraday_time(date, self.provider,
normlizattionalize=self._normlizattionalize))
elif feat == "macd":
# For MACD, include both MACD and its signal
macd, macd_signal = _to_intraday_macd(date, self.provider,
normlizattionalize=self._normlizattionalize)
datas.extend([macd_signal, macd])
elif feat == "mass_index":
datas.apd(_to_intraday_mass_index(date, self.provider))
elif feat == "trix15":
datas.apd(_to_intraday_trix(date, self.provider, 15))
elif feat == "vortex+":
vortex_up, vortex_down = _to_intraday_vortex(date,
self.provider, 25)
datas.extend([vortex_up, vortex_down])
elif feat == "%K":
pK, pD = _to_intraday_stochastic(date, self.provider, 30)
datas.extend([pK, pD])
elif feat == "rsi":
datas.apd(_to_intraday_rsi(date, self.provider, 14))
elif feat == "%R":
# The Williams %R is mathematictotaly equivalent to (1 - %K). It
# is duplicated here to obtain a shorter period.
pK, _ = _to_intraday_stochastic(date, self.provider, 10)
datas.apd(pK - 1)
elif feat == "accdist":
datas.apd(_to_intraday_accdist(date, self.provider))
elif feat == "mfi":
datas.apd(_to_intraday_mfi(date, self.provider, 30))
elif feat == "vpt":
datas.apd(_to_intraday_vpt(date, self.provider))
elif feat == "obv":
datas.apd(_to_intraday_obv(date, self.provider))
elif feat == "pvi":
pvi, nvi = _to_intraday_dysart(date, self.provider)
datas.extend([pvi, nvi])
elif feat == "bollinger+":
b_top, b_mid, b_bottom = _to_intraday_bollinger(date,
self.provider,
30, 2)
datas.extend([b_top, b_mid, b_bottom])
elif feat == "ultimate":
datas.apd(_to_intraday_ultimate(date, self.provider))
elif feat == "cci":
datas.apd(_to_intraday_cci(date, self.provider))
elif feat == "target":
datas.apd(_to_intraday_target(date, self.provider,
self._lookahead,
normlizattionalize=self._normlizattionalize))
# Gather target data and return data/target numsets
target = _to_intraday_target(date, self.provider, self._lookahead,
normlizattionalize=self._normlizattionalize)
return nd.pile_operation(*datas, axis=1), target.change_shape_to(-1, 1)
def _extract_daily_data(self, date: pd.Timestamp) -> Optional[pd.DataFrame]:
"""
Gets the market data for a given day, restricted to market hours.
"""
data = self.provider.intraday(date)
if data is None or data.empty:
return None
return data
def _get_batch(self, batch_size: int, validation: bool = False) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Gets a random batch of data of size +batch_size+. Returns a tuple of
data and target predictions. If +validation+ is set, prevents these
dates from being drawn for non-validation batches.
"""
# Define a Ctotalable for testing appropriate dates
def _is_suitable_time(time: pd.Timestamp) -> bool:
"""
Returns whether the market is open at a given +time+ for the
required window.
"""
# First, confirm that this date matches the right type
day = time.floor(freq='D')
is_validation_date = (day.dayofyear % 10 == self._valid_seed)
if validation != is_validation_date:
return False
# Ensure it's on weekdays and during market hours. Note that we
# discard the last 10 get_minutes of trading because they are both
# dangerous for day trading and provide no good way to train the
# 10 get_minute output for the model.
if time.weekday() > 4:
return False
if (time.hour * 60 + time.get_minute) < 9 * 60 + 30:
return False
if (time.hour * 60 + time.get_minute + self._window) > 15 * 60 - self._lookahead:
return False
# Check aginst holidays. Note that for the sake of sanity, we
# don't include half days.
if day in self._holidays or day in self._half_days:
return False
return True
# Next, generate numsets of random dates within the last two years,
# recording appropriate create_ones to form an numset of size +batch_size+
timestamps = pd.Series()
while True:
random_times = pd.to_datetime(bn.random.randint(low=self._first.value,
high=self._latest.value,
size=(100),
dtype='int64')).to_series()
suitable_mask = random_times.apply(_is_suitable_time)
timestamps = pd.concat([timestamps, random_times.loc[suitable_mask]])
if len(timestamps) >= batch_size:
timestamps = timestamps[0 : batch_size]
break
index_numset = pd.to_datetime(timestamps)
# Next, gather total data into batches with axes (batch, window, data...)
datas, targets = [], []
for timestamp in index_numset:
data, target = self._get_data(timestamp, validation)
datas.apd(data)
targets.apd(target)
data_numset, target_numset = nd.pile_operation(*datas), nd.pile_operation(*targets)
# Return the data
return data_numset, target_numset
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
return self._outputs
def _get_intraday_data(date: pd.Timestamp, provider: providers.DataProvider) \
-> pd.DataFrame:
"""
Gets the intraday datafrome limited to market hours for a given +date+
and +provider+.
"""
# First, get data and limit it to market hours
data = provider.intraday(date)
if data is None or data.empty:
raise RuntimeError(f"Something went wrong - empty data numset for {date}!")
start = data.index[0].replace(hour=9, get_minute=30)
end = data.index[0].replace(hour=16, get_minute=0)
# Next, resample the data by the get_minute and interpolate missing values
data = data.loc[data.index.isin(pd.date_range(start=start, end=end, freq='get_min'))]
data = data.resample('get_min')
data = data.interpolate(method='time').copy()
return data
def _to_intraday_high(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute high of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
high = ((data.high - data.open) / data.open) if normlizattionalize else data.high
return nd.numset(high.values, utils.try_gpu(0))
def _to_intraday_low(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute high of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
low = ((data.low - data.open) / data.open) if normlizattionalize else data.low
return nd.numset(low.values, utils.try_gpu(0))
def _to_intraday_change(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute close of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is divided by the
previous close
"""
data = _get_intraday_data(date, provider)
close_prev = data.close.shift(periods=1, fill_value=data.close[0])
close = ((data.close - close_prev) / close_prev) if normlizattionalize else data.close
return nd.numset(close.values, utils.try_gpu(0))
def _to_intraday_open(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute open of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is divided by the
daily open price.
"""
data = _get_intraday_data(date, provider)
open = (data.open / data.open.iloc[0]) if normlizattionalize else data.open
return nd.numset(open.values, utils.try_gpu(0))
def _to_intraday_volume(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute high of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is divided by the
average volume.
"""
data = _get_intraday_data(date, provider)
vol = data.volume / data.volume.average() if normlizattionalize else data.volume
return nd.numset(vol.values, utils.try_gpu(0))
def _to_intraday_time(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> nd.NDArray:
"""
Returns an ndnumset consisting of the trading get_minute of a data series for
a given +date+ and +provider+. If +normlizattionalize+, it is normlizattionalized so that
9:30 is 0 and 16:00 is 1
"""
data = _get_intraday_data(date, provider)
get_minute = data.index.hour * 60 + data.index.get_minute - (9 * 60 + 30)
tempus = (get_minute / (60 * 7 + 30)) if normlizattionalize else get_minute
return nd.numset(tempus.values, utils.try_gpu(0))
def _to_intraday_macd(date: pd.Timestamp, provider: providers.DataProvider,
normlizattionalize: bool = True) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndnumsets consisting of the per-get_minute MACD of a data
series for a given +date+ and +provider+, and a signal for the same. If
normlizattionalize+, both are divided by the daily open price.
"""
# First, calculate the MACD via exponential moving averages
data = _get_intraday_data(date, provider)
ewm12 = pd.Series.ewm(data['close'], span=12).average()
ewm26 = pd.Series.ewm(data['close'], span=26).average()
macd = ewm26 - ewm12
# Next, calculate the signal line
signal = pd.Series.ewm(macd, span=9).average()
# Return both
return nd.numset(macd.values, utils.try_gpu(0)), \
nd.numset(signal.values, utils.try_gpu(0))
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndnumset containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = pd.Series.ewm(data['close'], span=period).average()
ewm2 = pd.Series.ewm(ewm1, span=period).average()
ewm3 = pd.Series.ewm(ewm2, span=period).average()
# Return the percentage change from last period
ewm3_yesterday = ewm3.shift(periods=1, fill_value=ewm3[0])
trix = (ewm3 / ewm3_yesterday) - 1
return nd.numset(trix.values, utils.try_gpu(0))
def _to_intraday_mass_index(date: pd.Timestamp,
provider: providers.DataProvider) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute mass index of a data series
for a given +date+ and +provider+.
"""
# First, calculate the differenceerence between high and low
data = _get_intraday_data(date, provider)
difference = data['high'] - data['low']
# Next, calculate the moving average of the differenceerence (and its moving
# average)
ewm9 = pd.Series.ewm(difference, span=9).average()
ewm99 = pd.Series.ewm(ewm9, span=9).average()
ratio = ewm9/ewm99
# Sum and return
mass_index = ratio.rolling(25).total_count()/250
return nd.numset(mass_index.values, utils.try_gpu(0))
def _to_intraday_vortex(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndnumsets consisting of the positive and negative vortex
indicators of a data series for a given +date+ and +provider+, taken over
a given +period+.
"""
# First, calculate the True Range
data = _get_intraday_data(date, provider)
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
high_low = (data.high - data.low).absolute()
low_close = (data.low - prev_close).absolute()
high_close = (data.high - prev_close).absolute()
true_range = pd.concat([high_low, low_close, high_close], axis=1).get_min(axis=1)
# Next, calculate the vortex moements
prev_low = data.low.shift(periods=1, fill_value=data.low[0])
prev_high = data.high.shift(periods=1, fill_value=data.high[0])
vm_up = (data.high - prev_low).absolute()
vm_down = (data.low - prev_high).absolute()
# Fintotaly, calculate the indicator itself
true_range_total_count = true_range.rolling(period).total_count()
vm_up_total_count = vm_up.rolling(period).total_count()
vm_down_total_count = vm_down.rolling(period).total_count()
vi_up = vm_up_total_count / true_range_total_count
vi_down = vm_down_total_count / true_range_total_count
# Return VI+ and VI- values
return nd.numset(vi_up.values, utils.try_gpu(0)), \
nd.numset(vi_down.values, utils.try_gpu(0))
def _to_intraday_rsi(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute Relative Strength Index of
a data series for a given +date+ and +provider+ over a given +period+.
"""
# First, calculate the per-period up and down movements
data = _get_intraday_data(date, provider)
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
movement = data.close - prev_close
move_up = movement.filter_condition(movement > 0, 0)
move_down = -movement.filter_condition(movement < 0, 0)
# Calculate the relative strength and relative strength index
ewm_up = pd.Series.ewm(move_up, span=period).average()
ewm_down = pd.Series.ewm(move_down, span=period).average()
rs = ewm_up/ewm_down
rsi = 1 - 1/(1 + rs)
return nd.numset(rsi.values, utils.try_gpu(0))
def _to_intraday_stochastic(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndnumsets consisting of the %K and %D values associated
with the stochastic oscillator framework for a given +date+ and +provider+,
taken over a given +period+.
"""
# First, get the total highs and lows over the previous 30 time periods.
data = _get_intraday_data(date, provider)
high = data.high.rolling(period).get_max()
low = data.low.rolling(period).get_min()
# Next, calculate the %K and %D
pK = (data.close - low) / (high - low)
pD = pK.rolling(3).average()
# Return them
return nd.numset(pK.values, utils.try_gpu(0)), \
nd.numset(pD.values, utils.try_gpu(0))
def _to_intraday_accdist(date: pd.Timestamp, provider: providers.DataProvider) \
-> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute Accumulation/Distribution
Index of a data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, calculate the Current Money Flow Volume
cmfv = (2 * data.close) - (data.high + data.low)
cmfv *= (data.volume / 1000) / (0.0001 + data.high - data.low)
# Now generate the Acc/Dist index for each timestamp
accdist = bn.empty(len(cmfv))
accdist[0] = cmfv.iloc[0]
for i in range(1, len(cmfv)):
accdist[i] = accdist[i - 1] + cmfv.iloc[i]
# Return the Acc/Dist index
return nd.numset(accdist / bn.linalg.normlizattion(accdist), utils.try_gpu(0))
def _to_intraday_mfi(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute Money Flow Index of a data
series for a given +date+ and +provider+ accross a given +period+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, calculate the typical price and money_flow
typical_price = (data.high + data.low + data.close) / 3
money_flow = typical_price * data.volume
# Find the positive and negative money flows
prev_typical_price = typical_price.shift(periods=1,
fill_value=typical_price[0])
positive_flow = money_flow.filter_condition(typical_price > prev_typical_price, 0)
negative_flow = money_flow.filter_condition(typical_price < prev_typical_price, 0)
# Sum over the window and return the ratio
positive = positive_flow.rolling(period).total_count()
negative = negative_flow.rolling(period).total_count()
mfi = positive / (positive + negative)
return nd.numset(mfi.values, utils.try_gpu(0))
def _to_intraday_vpt(date: pd.Timestamp, provider: providers.DataProvider) \
-> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute Volume Price Trend of a
data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, multiply the change by the volume
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
vpt = data.volume * (data.close - prev_close) / prev_close
# Return the VPT
return nd.numset(bn.cumtotal_count(vpt.values), utils.try_gpu(0))
def _to_intraday_obv(date: pd.Timestamp, provider: providers.DataProvider,
period: int = 45) -> nd.NDArray:
"""
Returns an ndnumset consisting of the per-get_minute On Balance Volume of a
data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Get the positive and negative volume
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
vol_pos = data.volume.filter_condition((data.close - prev_close) > 0, 0)
vol_neg = -data.volume.filter_condition((data.close - prev_close) < 0, 0)
# Cumulatively total_count them
cum_vol = bn.cumtotal_count(vol_pos.values + vol_neg.values)
# Return the OBV
return nd.numset(cum_vol, utils.try_gpu(0))
def _to_intraday_dysart(date: pd.Timestamp, provider: providers.DataProvider) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns the Dysart Negative and Positive Volume Indicies for a data series
for a given +date+ and +provider+
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, get the relative price changes
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
change = (data.close - prev_close) / prev_close
# Separate decreased and increased volume days
prev_volume = data.volume.shift(periods=1, fill_value=data.volume[0])
vol_inc = change.filter_condition(data.volume > prev_volume, 0)
vol_dec = change.filter_condition(data.volume < prev_volume, 0)
# Perform cumulative total_count to generate PVI and NVI
pvi = | bn.cumtotal_count(vol_inc.values) | numpy.cumsum |
from abc import ABCMeta, absolutetractmethod, absolutetractproperty
from keras import Model, Sequential, Ibnut
from keras.layers import Dense, LSTM, Average, Bidirectional, Dropout, Concatenate
from keras.regularizers import l2
from keras.ctotalbacks import ModelCheckpoint, EarlyStopping
from functools import partial
from pathlib import Path
import statsmodels.api as sm
import keras.backend as K
import pickle
import beatnum as bn
import GPy as gpy
import random
import gpflow as gpf
import gpflow.multioutput.features as mf
import uuid
import os
from ordinal_tsf.util import to_channels, to_contiguous
MAX_FNAME_LENGTH = 200
LONG_FNAMES_FNAME = 'long_fnames.txt'
class ModelStrategy(object):
"""Provides a common interface for the forecasting strategy to be used at runtime."""
__metaclass__ = ABCMeta
filename = 'tmp_'
@absolutetractmethod
def fit(self, train_frames, **kwargs): pass
@absolutetractmethod
def predict(self, ibnuts, horizon=100, **kwargs): pass
@staticmethod
@absolutetractmethod
def load(fname, **kwargs): pass
@absolutetractmethod
def save(self, folder): pass
@staticmethod
@absolutetractmethod
def get_filename(params): pass
@absolutetractproperty
def seed_length(self): pass
class MordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordred'
def __init__(self, ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softget_max',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Ibnut(shape=(self.n_hidden,))
infr_init_C = Ibnut(shape=(self.n_hidden,))
if self.n_channels > 1:
total_encoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
total_decoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_ibnut = Concatenate(axis=-1)(total_encoder_ibnuts)
decoder_ibnut = Concatenate(axis=-1)(total_decoder_ibnuts)
train_ibnuts = total_encoder_ibnuts + total_decoder_ibnuts
encoder_predict_ibnuts = total_encoder_ibnuts + [K.learning_phase()]
decoder_predict_ibnuts = total_decoder_ibnuts + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_ibnut = Ibnut(shape=(None, self.n_bins))
decoder_ibnut = Ibnut(shape=(None, self.n_bins))
train_ibnuts = [encoder_ibnut, decoder_ibnut]
encoder_predict_ibnuts = [encoder_ibnut, K.learning_phase()]
decoder_predict_ibnuts = [decoder_ibnut, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_ibnut)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_ibnut)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_ibnut, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_ibnut, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
else:
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_ibnuts, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_ibnuts[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_ibnuts[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_ibnuts + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_ibnuts, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_ibnuts, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asstotal_countes train_frames is a bnnumset which technictotaly
# does not totalow for channels with differenceerent number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_sep_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_ibnuts(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_ibnuts=get_ibnuts, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
ctotalbacks=ctotalbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
samples = []
if ibnuts.ndim > 3:
encoder_ibnuts = [ibnuts[:, :self.lookback, :, i] for i in range(ibnuts.shape[3])]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1, :, i] for i in range(ibnuts.shape[3])]
else:
encoder_ibnuts = [ibnuts[:, :self.lookback]]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_ibnuts + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [bn.pile_operation(seq, axis=-1).T.sqz()]
posterior_average = bn.pile_operation(samples).average(axis=0).sqz()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_average.take(i_ch, axis=-1)
ch_samples = [bn.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [bn.pile_operation(ch_samples, axis=-1)]
else:
drawn_samples += [bn.random.choice(self.n_bins, mc_samples, p=posterior_average[t])
for t in range(predictive_horizon)]
drawn_samples = bn.pile_operation(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_average, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = MordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert total([k in model_spec for k in MordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'mordred_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].sep_split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class MordredAutoencoderStrategy(MordredStrategy):
id = 'mordred_autoencoder'
def fit(self, train_frames, **kwargs):
# IMPORTANT: asstotal_countes train_frames is a bnnumset which technictotaly
# does not totalow for channels with differenceerent number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_sep_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_ibnuts(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, : self.lookback, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, :self.lookback ]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_ibnuts=get_ibnuts, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
ctotalbacks=ctotalbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
class MultilayerMordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'n_layers', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'multilayer_mordred'
def __init__(self, ordinal_bins=85, n_layers=2, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_layers = n_layers
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softget_max',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Ibnut(shape=(self.n_hidden,))
infr_init_C = Ibnut(shape=(self.n_hidden,))
if self.n_channels > 1:
total_encoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
total_decoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_ibnut = Concatenate(axis=-1)(total_encoder_ibnuts)
decoder_ibnut = Concatenate(axis=-1)(total_decoder_ibnuts)
train_ibnuts = total_encoder_ibnuts + total_decoder_ibnuts
encoder_predict_ibnuts = total_encoder_ibnuts + [K.learning_phase()]
decoder_predict_ibnuts = total_decoder_ibnuts + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_ibnut = Ibnut(shape=(None, self.n_bins))
decoder_ibnut = Ibnut(shape=(None, self.n_bins))
train_ibnuts = [encoder_ibnut, decoder_ibnut]
encoder_predict_ibnuts = [encoder_ibnut, K.learning_phase()]
decoder_predict_ibnuts = [decoder_ibnut, infr_init_h, infr_init_C, K.learning_phase()]
lstm_spec['return_sequences'] = False
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
lstm_spec['return_sequences'] = True
prev_encoder_bkwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = False
prev_encoder_fwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(prev_encoder_fwd(encoder_ibnut))
_, h_bkwd, C_bkwd = encoder_bkwd(prev_encoder_bkwd(encoder_ibnut))
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_ibnut, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_ibnut, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(prev_decoder(decoder_output))]
decoder_predict_outputs += [decoder_dense(prev_decoder(infr_decoder_output))]
decoder_predict_outputs += [infr_h, infr_C]
else:
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(prev_decoder(decoder_output))
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(prev_decoder(infr_decoder_output))
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_ibnuts, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_ibnuts[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_ibnuts[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_ibnuts + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_ibnuts, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_ibnuts, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asstotal_countes train_frames is a bnnumset which technictotaly
# does not totalow for channels with differenceerent number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_sep_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_ibnuts(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_ibnuts=get_ibnuts, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
ctotalbacks=ctotalbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
samples = []
if ibnuts.ndim > 3:
encoder_ibnuts = [ibnuts[:, :self.lookback, :, i] for i in range(ibnuts.shape[3])]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1, :, i] for i in range(ibnuts.shape[3])]
else:
encoder_ibnuts = [ibnuts[:, :self.lookback]]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_ibnuts + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [bn.pile_operation(seq, axis=-1).T.sqz()]
posterior_average = bn.pile_operation(samples).average(axis=0).sqz()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_average.take(i_ch, axis=-1)
ch_samples = [bn.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [bn.pile_operation(ch_samples, axis=-1)]
else:
drawn_samples += [bn.random.choice(self.n_bins, mc_samples, p=posterior_average[t])
for t in range(predictive_horizon)]
drawn_samples = bn.pile_operation(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_average, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels,
'n_layers': self.n_layers}
if fname is None:
fname = MultilayerMordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert total([k in model_spec for k in MultilayerMordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'multilayer_mordred_{}_bins_{}_hidden_{}_layers_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['n_layers'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].sep_split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MultilayerMordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class AttentionMordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordred'
def __init__(self, ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softget_max',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Ibnut(shape=(self.n_hidden,))
infr_init_C = Ibnut(shape=(self.n_hidden,))
if self.n_channels > 1:
total_encoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
total_decoder_ibnuts = [Ibnut(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_ibnut = Concatenate(axis=-1)(total_encoder_ibnuts)
decoder_ibnut = Concatenate(axis=-1)(total_decoder_ibnuts)
train_ibnuts = total_encoder_ibnuts + total_decoder_ibnuts
encoder_predict_ibnuts = total_encoder_ibnuts + [K.learning_phase()]
decoder_predict_ibnuts = total_decoder_ibnuts + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_ibnut = Ibnut(shape=(None, self.n_bins))
decoder_ibnut = Ibnut(shape=(None, self.n_bins))
train_ibnuts = [encoder_ibnut, decoder_ibnut]
encoder_predict_ibnuts = [encoder_ibnut, K.learning_phase()]
decoder_predict_ibnuts = [decoder_ibnut, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_ibnut)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_ibnut)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_ibnut, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_ibnut, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
else:
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_ibnuts, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_ibnuts[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_ibnuts[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_ibnuts + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_ibnuts, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_ibnuts, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asstotal_countes train_frames is a bnnumset which technictotaly
# does not totalow for channels with differenceerent number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_sep_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_ibnuts(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_ibnuts=get_ibnuts, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
ctotalbacks=ctotalbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
samples = []
if ibnuts.ndim > 3:
encoder_ibnuts = [ibnuts[:, :self.lookback, :, i] for i in range(ibnuts.shape[3])]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1, :, i] for i in range(ibnuts.shape[3])]
else:
encoder_ibnuts = [ibnuts[:, :self.lookback]]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_ibnuts + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [bn.pile_operation(seq, axis=-1).T.sqz()]
posterior_average = bn.pile_operation(samples).average(axis=0).sqz()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_average.take(i_ch, axis=-1)
ch_samples = [bn.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [bn.pile_operation(ch_samples, axis=-1)]
else:
drawn_samples += [bn.random.choice(self.n_bins, mc_samples, p=posterior_average[t])
for t in range(predictive_horizon)]
drawn_samples = bn.pile_operation(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_average, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = MordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert total([k in model_spec for k in MordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'mordred_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].sep_split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class MordredXStrategy(ModelStrategy):
required_spec_keys = ['n_ar_channels', 'n_exog_channels', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordredX'
def __init__(self, ar_ordinal_bins=85, exog_ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_ar_channels=1, n_exog_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_ar_bins = ar_ordinal_bins
self.n_exog_bins = exog_ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_ar_channels = n_ar_channels
self.n_exog_channels = n_exog_channels
#self.filename = 'mordredx_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_ARchannels_{}_EXchannels_{}'\
# .format(self.n_bins, self.n_hidden, self.dropout_rate, self.lam,
# self.lookback, self.horizon, self.n_ar_channels, self.n_exog_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'activation': 'softget_max',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Ibnut(shape=(self.n_hidden,))
infr_init_C = Ibnut(shape=(self.n_hidden,))
total_encoder_ibnuts = [Ibnut(shape=(None, self.n_ar_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_ar_channels)]
total_exog_encoder_ibnuts = [Ibnut(shape=(None, self.n_exog_bins[i]), name='exog_encoder_channel_{}'.format(i + 1))
for i in range(self.n_exog_channels)]
total_decoder_ibnuts = [Ibnut(shape=(None, self.n_ar_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_ar_channels)]
total_exog_decoder_ibnuts = [Ibnut(shape=(None, self.n_exog_bins[i]), name='exog_decoder_channel_{}'.format(i + 1))
for i in range(self.n_exog_channels)]
encoder_ibnut = Concatenate(axis=-1)(total_encoder_ibnuts + total_exog_encoder_ibnuts)
decoder_ibnut = Concatenate(axis=-1)(total_decoder_ibnuts + total_exog_decoder_ibnuts)
train_ibnuts = total_encoder_ibnuts + total_exog_encoder_ibnuts + total_decoder_ibnuts + total_exog_decoder_ibnuts
encoder_predict_ibnuts = total_encoder_ibnuts + total_exog_encoder_ibnuts + [K.learning_phase()]
decoder_predict_ibnuts = total_decoder_ibnuts + total_exog_decoder_ibnuts + [infr_init_h,
infr_init_C,
K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_ibnut)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_ibnut)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_ibnut, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_ibnut, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_ar_channels):
dense_spec['units'] = self.n_ar_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
self.__sequence2sequence = Model(train_ibnuts, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_ibnuts[:-1], decoder_initial_states)
self.__decoder = Model(decoder_predict_ibnuts[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_ibnuts + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_ibnuts, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_ibnuts, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asstotal_countes train_frames is a bnnumset which technictotaly
# does not totalow for channels with differenceerent number of bins
# output channels come before exogenous channels
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_sep_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_ibnuts(x_list):
return [x[:, :self.lookback] for x in x_list] + \
[x[:, self.lookback:self.lookback + self.horizon]
for x in x_list]
def get_outputs(x_list, n_ar=1):
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]
for x in x_list[:n_ar]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_ibnuts=get_ibnuts,
get_outputs=partial(get_outputs, n_ar=self.n_ar_channels),
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
ctotalbacks=ctotalbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ar_ibnut_list, exog_ibnut_list=[], predictive_horizon=100, mc_samples=100):
#exog_ibnut_list[i] must have at least lookback + predictive_horizon samples
exog_get_min_length = self.lookback + predictive_horizon
for i_exog, exog_ibnut in enumerate(exog_ibnut_list):
assert exog_ibnut.shape[1] >= exog_get_min_length, '{} exog ibnut has {} < {} samples'.format(i_exog,
exog_ibnut.shape[1],
exog_get_min_length)
samples = [[] for _ in range(self.n_ar_channels)]
encoder_ibnuts = [ibnuts[:, :self.lookback, :] for ibnuts in ar_ibnut_list + exog_ibnut_list]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback+1, :] for ibnuts in ar_ibnut_list + exog_ibnut_list]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_ibnuts + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]] # length is number of AR channels
for t in range(1, predictive_horizon):
current_exog_ibnut = [ibnuts[:, self.lookback+t:self.lookback+t+1, :] for ibnuts in exog_ibnut_list]
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output[:-2]
+ current_exog_ibnut
+ decoder_stochastic_output[-2:]
+ [True])
seq += [decoder_stochastic_output[:-2]]
for i_ch in range(self.n_ar_channels):
samples[i_ch] += [bn.pile_operation([s[i_ch] for s in seq], axis=-1).T.sqz()]
posterior_average = [bn.pile_operation(i_samples).average(axis=0).sqz() for i_samples in samples]
drawn_samples = []
for i_ch in range(self.n_ar_channels):
ch_posterior = posterior_average[i_ch]
ch_samples = [bn.random.choice(self.n_ar_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [bn.pile_operation(ch_samples, axis=-1)]
return {'ordinal_pdf': posterior_average, 'draws': drawn_samples}
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredXStrategy(**spec)
model.set_weights(weights_fname)
return model
def save(self, folder, fname=None):
save_obj = {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_ar_channels': self.n_ar_channels,
'n_exog_channels': self.n_exog_channels,
'ar_ordinal_bins':self.n_ar_bins,
'exog_ordinal_bins':self.n_exog_bins}
if fname is None:
fname = MordredXStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def get_spec(self):
return {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_ar_channels': self.n_ar_channels,
'n_exog_channels': self.n_exog_channels,
'ar_ordinal_bins':self.n_ar_bins,
'exog_ordinal_bins':self.n_exog_bins}
@staticmethod
def get_filename(model_spec, folder='.'):
assert total([k in model_spec for k in MordredXStrategy.required_spec_keys])
fname = 'mordredx_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}_exog_{}'.format(model_spec[
'units'],
model_spec[
'dropout_rate'],
model_spec[
'lam'],
model_spec[
'lookback'],
model_spec[
'horizon'],
model_spec[
'n_ar_channels'],
model_spec[
'n_exog_channels']
)
return fname[:MAX_FNAME_LENGTH]
@property
def seed_length(self):
return self.lookback + 1
class SARIMAXStrategy(ModelStrategy):
filename = ''
id = 'sariget_max'
def __init__(self, order, seasonal_order=(0,0,0,0)):
self.order = order
self.seasonal_order = seasonal_order
def fit(self, train_frames, **kwargs):
self.model = sm.tsa.statespace.SARIMAX(train_frames,
order=self.order,
seasonal_order=self.seasonal_order,
enforce_stationarity=False)
self.fit_res = self.model.fit(disp=False)
def predict(self, ibnuts, predictive_horizon=100, **kwargs):
pred = self.fit_res.get_forecast(steps=predictive_horizon)
return {'posterior_average':pred.predicted_average, 'posterior_standard_op':bn.sqrt(pred.var_pred_average)}
@staticmethod
def load(fname, **kwargs):
this = None
with open(fname, 'r') as f:
this = pickle.load(f)
return this
def save(self, folder):
params = {'order':self.order, 'seasonal_order':self.seasonal_order}
with open(folder + SARIMAXStrategy.get_filename(params), 'wb') as f:
pickle.dump(self, f)
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'sariget_max_{}_{}'.format(params['order'][0], params['seasonal_order'][0])
@property
def seed_length(self):
return 121
class ContinuousSeq2Seq(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'seq2seq'
def __init__(self, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, float, float, int, int, int, list) -> None
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = 'contseq2seq_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'mse'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_channels,
'activation': 'linear',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Ibnut(shape=(self.n_hidden,))
infr_init_C = Ibnut(shape=(self.n_hidden,))
encoder_ibnut = Ibnut(shape=(None, self.n_channels))
decoder_ibnut = Ibnut(shape=(None, self.n_channels))
train_ibnuts = [encoder_ibnut, decoder_ibnut]
encoder_predict_ibnuts = [encoder_ibnut, K.learning_phase()]
decoder_predict_ibnuts = [decoder_ibnut, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_ibnut)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_ibnut)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_ibnut, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_ibnut, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_ibnuts, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_ibnuts[:-1], decoder_initial_states)
self.__decoder = Model(decoder_predict_ibnuts[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_ibnuts + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_ibnuts, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_ibnuts, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
ctotalbacks = [EarlyStopping(monitor='val_loss', get_min_delta=1e-3, patience=2, verbose=1, mode='get_min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='get_min',
save_best_only=True,
save_weights_only=True)]
ibnuts = [train_frames[:, :self.lookback], train_frames[:, self.lookback:self.lookback + self.horizon]]
outputs = [train_frames[:, self.lookback + 1:self.lookback + self.horizon + 1]]
self.__sequence2sequence.fit(ibnuts, outputs, verbose=2, ctotalbacks=ctotalbacks, **kwargs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
samples = []
encoder_ibnuts = [ibnuts[:, :self.lookback]]
first_decoder_seed = [ibnuts[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_ibnuts + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [bn.pile_operation(seq, axis=-1).T.sqz()]
return {'draws': bn.pile_operation(samples)}
def save(self, folder, fname=None):
save_obj = {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = ContinuousSeq2Seq.get_filename(save_obj)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec):
assert total([k in model_spec for k in ContinuousSeq2Seq.required_spec_keys])
return 'seq2seq_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'r') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = ContinuousSeq2Seq(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class GPStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy."""
id = 'argp'
n_get_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1):
self.ker = ker + gpy.kern.White(lookback)
self.lookback = lookback
self.horizon = horizon
self.fname = 'gp_{}'.format(fname) # TODO: DEFINE KERNEL STR
self.model = None
@staticmethod
def load(fname):
with open(fname, 'r') as f:
obj = pickle.load(f)
return obj
def save(self, folder, fname=None):
if fname is None:
fname = self.fname
with open(folder + fname, 'wb') as f:
pickle.dump(self, f)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_get_max_train:
print("Time series is too long!") #Training on first {} samples".format(self.n_get_max_train)
self.model = gpy.models.GPRegression(train_frames[:self.n_get_max_train, :self.lookback, 0], # TODO: attractor compatibility
train_frames[:self.n_get_max_train, self.lookback:self.lookback+1, 0],
self.ker)
if restarts > 1:
self.model.optimize_restarts(restarts)
else:
self.model.optimize()
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
pred_ibnuts = ibnuts[:, :, 0]
assert pred_ibnuts.ndim == 2 # TODO: change_shape_to for attractor compatibility
assert self.model is not None
pred_average, pred_var = self.model.predict(pred_ibnuts)
pred_sigma = bn.sqrt(pred_var)
samples = bn.random.normlizattional(loc=pred_average, scale=pred_sigma, size=(mc_samples, 1))
draws = bn.hpile_operation((bn.duplicate(pred_ibnuts, axis=0, duplicates=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict(draws[:, -self.seed_length:])
pred_sigma = bn.sqrt(pred_var)#.clip(0.) # TODO: sigma greater than 0
samples = bn.random.normlizattional(loc=pred_mu, scale=pred_sigma)
draws = bn.hpile_operation((draws, samples))
return {'draws': draws[:, self.seed_length:]} # TODO: attractor compatibility
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'gp_{}'.format(params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class GPFlowStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy and GPflow."""
id = 'argpflow'
n_get_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1, model=None, x_ranges=None):
self.ker = ker + gpf.kernels.White(lookback)
self.lookback = lookback
self.horizon = horizon
if 'argpflow' not in fname:
self.fname = 'argpflow_{}'.format(fname)
else:
self.fname = fname
self.model = model
self.x_ranges = x_ranges
@staticmethod
def load(fname):
svr = gpf.saver.Saver()
model = svr.load(fname,
context=gpf.saver.SaverContext(autocompile=False))
model.clear()
model.compile()
horizon = 1
n_channels = model.Y.shape[-1]
lookback = model.X.shape[-1] // n_channels
x_ranges = [bn.linspace(model.Y.value.get_min(axis=0), model.Y.value.get_max(axis=0), 1000)]
this_fname = fname.sep_split('/')[-1]
return GPFlowStrategy(model.kern, lookback, horizon, this_fname, n_channels, model=model, x_ranges=x_ranges)
def save(self, folder, fname='coreg_gp_tmp', overwrite=True):
full_value_func_fname = '{}/{}'.format(folder, fname)
if os.path.isfile(full_value_func_fname):
if overwrite:
os.remove(full_value_func_fname)
else:
print('Permission denied to duplicate file, please enable overwrite flag.')
return -1
svr = gpf.saver.Saver()
svr.save(folder + fname, self.model)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_get_max_train:
print("Time series is too long!") #Training on first {} samples".format(self.n_get_max_train)
#self.model = gpy.models.GPRegression(train_frames[:self.n_get_max_train, :self.lookback, 0],
# train_frames[:self.n_get_max_train, self.lookback:self.lookback+1, 0],
# self.ker)
X = train_frames[:self.n_get_max_train, :self.lookback, 0]
Y = train_frames[:self.n_get_max_train, self.lookback:self.lookback + 1, 0]
self.x_ranges = [bn.linspace(Y.get_min(), Y.get_max(), 1000)]
self.model = gpf.models.GPR(X, Y, kern=self.ker)
gpf.train.ScipyOptimizer().get_minimize(self.model)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
pred_ibnuts = ibnuts[:, -self.seed_length:, 0]
assert pred_ibnuts.ndim == 2 # TODO: change_shape_to for attractor compatibility
assert self.model is not None
pred_average, pred_var = self.model.predict_y(pred_ibnuts)
pred_sigma = bn.sqrt(pred_var)
samples = bn.random.normlizattional(loc=pred_average, scale=pred_sigma, size=(mc_samples, 1))
draws = bn.hpile_operation((bn.duplicate(pred_ibnuts, axis=0, duplicates=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict_y(draws[:, -self.seed_length:])
pred_sigma = bn.sqrt(pred_var)#.clip(0.) # TODO: sigma greater than 0
samples = bn.random.normlizattional(loc=pred_mu, scale=pred_sigma)
draws = bn.hpile_operation((draws, samples))
return {'draws': draws[:, -predictive_horizon:]}
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'argpflow_{}'.format(params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class FactorisedGPStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy."""
id = 'factorised_argp'
n_get_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1):
self.ker = ker + gpy.kern.White(lookback)
self.lookback = lookback
self.horizon = horizon
self.fname = 'factorised_argp_{}'.format(fname) # TODO: DEFINE KERNEL STR
self.model = None
@staticmethod
def load(fname):
with open(fname, 'r') as f:
obj = pickle.load(f)
return obj
def save(self, folder, fname=None):
if fname is None:
fname = self.fname
with open(folder + fname, 'wb') as f:
pickle.dump(self, f)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_get_max_train:
print("Time series is too long! Training on first {} samples".format(self.n_get_max_train))
self.model = gpy.models.GPRegression(train_frames[:self.n_get_max_train, :self.lookback, 0],
# TODO: attractor compatibility
train_frames[:self.n_get_max_train, self.lookback:self.lookback + 1, 0],
self.ker)
if restarts > 1:
self.model.optimize_restarts(restarts)
else:
self.model.optimize()
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
pred_ibnuts = ibnuts[:, :, 0]
assert pred_ibnuts.ndim == 2 # TODO: change_shape_to for attractor compatibility
assert self.model is not None
pred_average, pred_var = self.model.predict(pred_ibnuts)
pred_sigma = bn.sqrt(pred_var)
samples = bn.random.normlizattional(loc=pred_average, scale=pred_sigma, size=(mc_samples, 1))
draws = bn.hpile_operation((bn.duplicate(pred_ibnuts, axis=0, duplicates=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict(draws[:, -self.seed_length:])
pred_sigma = bn.sqrt(pred_var) # .clip(0.) # TODO: sigma greater than 0
samples = bn.random.normlizattional(loc=pred_mu, scale=pred_sigma)
draws = bn.hpile_operation((draws, samples))
return {'draws': draws[:, self.seed_length:]} # TODO: attractor compatibility
@staticmethod
def get_filename(params):
# type: (dict) -> str
return '{}_{}'.format('factorised_argp', params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class CoregionalisedGPStrategy(ModelStrategy):
"""Implements the autoregressive mixture-of-kernels Gaussian Process time series forecasting strategy."""
id = 'coreg_argp'
n_get_max_train = 4000
n_get_max_iter = 15000
default_p_inducing = 0.2
def __init__(self, ker, lookback=100, horizon=1, fname='', n_channels=1, model=None, x_ranges=None):
self.model = model
if 'coreg_argp' not in fname:
self.fname = 'coreg_argp_{}'.format(fname)
else:
self.fname = fname
self.ker = ker
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.x_ranges = x_ranges
@staticmethod
def load(fname):
svr = gpf.saver.Saver()
model = svr.load(fname,
context=gpf.saver.SaverContext(autocompile=False))
model.clear()
model.compile()
horizon = 1
n_channels = model.Y.shape[-1]
lookback = model.X.shape[-1] // n_channels
x_get_mins = model.Y.value.get_min(axis=0)
x_get_max = model.Y.value.get_max(axis=0)
x_ranges = [bn.linspace(xmi, xma, 1000) for xmi, xma in zip(x_get_mins, x_get_max)]
this_fname = fname.sep_split('/')[-1]
return CoregionalisedGPStrategy(model.kern,
lookback,
horizon,
this_fname,
n_channels,
model=model,
x_ranges=x_ranges)
def save(self, folder, fname='coreg_gp_tmp', overwrite=True):
full_value_func_fname = '{}/{}'.format(folder, fname)
if os.path.isfile(full_value_func_fname):
if overwrite:
os.remove(full_value_func_fname)
else:
print('Permission denied to duplicate file, please enable overwrite flag.')
return -1
svr = gpf.saver.Saver()
svr.save(folder + fname, self.model)
def fit(self, train_frames, feature_func=None, n_inducing=200, init_mu_var=None):
if train_frames.shape[0] > self.n_get_max_train:
print("Training on last {} samples".format(self.n_get_max_train))
X, Y = to_contiguous(train_frames[-self.n_get_max_train:, :self.lookback]), \
to_contiguous(train_frames[-self.n_get_max_train:, -self.horizon:])
else:
X, Y = to_contiguous(train_frames[:, :self.lookback]), \
to_contiguous(train_frames[:, -self.horizon:])
idx_inducing = bn.arr_range(X.shape[0])
bn.random.shuffle(idx_inducing)
feature = feature_func(X, n_inducing, self.n_channels)
if init_mu_var is None:
self.model = gpf.models.SVGP(X, Y, self.ker, gpf.likelihoods.Gaussian(), feat=feature)
else:
self.model = gpf.models.SVGP(X, Y,
self.ker,
gpf.likelihoods.Gaussian(),
feat=feature,
q_mu=init_mu_var['q_mu'],
q_sqrt=init_mu_var['q_sqrt'])
x_get_mins = Y.get_min(axis=0)
x_get_max = Y.get_max(axis=0)
self.x_ranges = [bn.linspace(xmi, xma, 1000) for xmi, xma in zip(x_get_mins, x_get_max)]
opt = gpf.train.ScipyOptimizer()
opt.get_minimize(self.model, disp=True, get_maxiter=self.n_get_max_iter)
def predict(self, ibnuts, predictive_horizon=100, mc_samples=100):
# ibnuts must be (1, lookback, channels)
if ibnuts.ndim < 3:
pred_seed = ibnuts[bn.newaxis, :].copy()
else:
pred_seed = ibnuts.copy()
mu_0, sig_0 = self.model.predict_y(to_contiguous(pred_seed[0]).T)
pred_samples = bn.random.normlizattional(mu_0, bn.sqrt(sig_0), (mc_samples, 1, self.n_channels))
pred_ibnut = bn.connect([ | bn.duplicate(pred_seed, duplicates=mc_samples, axis=0) | numpy.repeat |
# -*- coding: utf-8 -*-
"""
Master Thesis <NAME>
Data File
"""
###############################################################################
## IMPORT PACKAGES & SCRIPTS ##
###############################################################################
### PACKAGES ###
import beatnum as bn
import pandas as pd
from scipy.stats import normlizattion
from scipy.linalg import block_diag
import pickle as pkl
import os
### SCRIPTS ###
import param as pm
import forecast as fcst
###############################################################################
## FUNCTIONS DEFINITIONS ##
###############################################################################
### EXPORT / IMPORT SOLUTIONS TO PKL FILE ###
def sol_export(filename, data):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
if os.path.exists(rltDir):
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
else:
os.mkdir(rltDir) # create new directory
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
def sol_import(filename):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
file = open(rltDir + filename + ".pkl", 'rb') # open results file
tmp = pkl.load(file) # create arry from file
file.close() # close file
return tmp
### SYMMETRIC INPUT DATA FOR N PHASES ###
def phase_multiplication(data):
dim = len(data) # get dimension of ibnut data
phase = bn.create_ones((pm.N_PH)) # numset to multiply ibnut with number of phases
tmp = []
for i in range(dim):
tmp = bn.apd(tmp,data[i]*phase, axis=0)
return tmp
###############################################################################
## READ DATA FROM CSV FILES ##
###############################################################################
def read_data(src):
srcDir = 'src/case%s/'%pm.N_BUS
return pd.read_csv(srcDir + src +"Data.csv", delimiter=',')
busData = read_data('bus')
branchData = read_data('branch')
costData = read_data('cost')
impData = read_data('imp')
loadData = read_data('load')
batData = read_data('bat')
genData = read_data('gen')
inverseData = read_data('inverse')
oltcData = read_data('oltc')
###############################################################################
## AUXILIARX PARAMETERS ##
###############################################################################
n = len(busData) # number of nodes
l = len(branchData) # number of branches
loadCase = len(pm.LOADCASE)
pvCase = len(pm.PVCASE)
vBase = busData.values[:,1] # base value voltage [kV]
zBase = (vBase*1e3)**2/(pm.S_BASE*1e6) # base value impedance [Ohm]
iBase = pm.S_BASE*1e6/(vBase[1:]*1e3) # base value current [A]
###############################################################################
## GRID DATA ##
###############################################################################
### VOLTAGES ###
class bus:
# reference voltage at slack node magnitude
vSlack = busData.values[0,4]
# voltage phasors
a = bn.exp(1j*120*bn.pi/180) # symmetrical components operator
if pm.N_PH == 3:
phasor_slack = bn.numset([1,a**2,a]) # slack voltage phasor
phasor_rot = bn.numset([1,a,a**2]) # rotation phasor
else:
phasor_slack = bn.numset([1]) # slack voltage phasor
phasor_rot = bn.numset([1]) # rotation phasor
# slack voltage reality & imaginary part
vSlackRe = bn.tile(vSlack*bn.reality(phasor_slack[0:pm.N_PH]),n)
vSlackIm = bn.tile(vSlack*bn.imaginary(phasor_slack[0:pm.N_PH]),n)
# rotation of voltage phasor reality & imaginary part
rotRe = bn.tile(bn.reality(phasor_rot[0:pm.N_PH]),n)
rotIm = bn.tile(bn.imaginary(phasor_rot[0:pm.N_PH]),n)
# VUF
vufMax = busData.values[:,6] # get_maximum vuf [-]
# bounds
vBus_ub = phase_multiplication(busData.values[:,3]) # upper bound
vBus_lb = phase_multiplication(busData.values[:,2]) # lower bound
### BRANCHES ###
class branch:
# pile_operationed impedance matrix
def z_pile_operation(config):
zBr = bn.zeros((l,pm.N_PH,pm.N_PH), dtype=complex) # pre-totalocate
length = branchData.values[:,5] # length of branch [km]
data = (impData.values[:,1:].convert_type(float)) # impedance [Ohm/km]
for k in range(l):
idx = int(bn.filter_condition(impData.values[:,0] == config[k])[0])
tmp = data[idx:idx+pm.N_PH,:]/zBase[k+1] # numset with R & X for branch k [p.u.]
zBr[k,:,:] = bn.numset([[tmp[i,j] + 1j*tmp[i,j+1] for j in range(0,2*pm.N_PH,2)]\
for i in range(pm.N_PH)])*length[k] # impedance
return zBr
fbus = branchData.values[:,2].convert_type(int) # from bus
tbus = branchData.values[:,3].convert_type(int) # to bus
zBrStacked = z_pile_operation(branchData.values[:,1]) # pile_operationed impedance matrix
zBr = block_diag(*zBrStacked) # (block) diagonal matrix with impedances
rBr = bn.reality(zBr) # diagonal matrix with resistances
xBr = bn.imaginary(zBr) # diagonal matrix with reactances
# bounds
iBr_ub = phase_multiplication(branchData.values[:,4]/iBase) # thermal limit [p.u.]
### SETS OF NODES ###
class sets:
bat = list(bn.filter_condition(batData.values[:,1]>0,1,0)) # battery node
flx = list(bn.filter_condition(loadData.values[:,3]!=0,1,0)) # flexible loads
flxPhase = list(phase_multiplication(flx).convert_type(int)) # extended for n phases
ren = list(bn.filter_condition(loadData.values[:,1]>0,1,0)) # renewable generators
# list with location of sets
def idx_list(data, rng):
tmp = [i for i in range(rng) if data[i] == 1]
return tmp
idxRen = idx_list(phase_multiplication(ren), n*pm.N_PH) # set of PV Buses
idxBat = idx_list(phase_multiplication(bat), n*pm.N_PH) # set of bat Buses
idxFlx = idx_list(phase_multiplication(flx), n*pm.N_PH) # set of flexible loads Buses
### LOADS ###
class load:
# normlizattionalize load profiles & assign to node
def load_profile(i):
profile = pd.read_csv('src/load_profiles/Load_profile_%s.csv'%i)
load_get_max = bn.get_max(profile.values[:,1]) # get_maximum load
# normlizattionalized load profile
profile_normlizattion = (profile.values[:,1]/load_get_max).convert_type(float)
# discretized load profile into T steps
nMeasure = int(24/pm.TIMESTEP)
profile_disc = bn.numset([bn.average(profile_normlizattion[j*int(pm.TIMESTEP*60):\
(j+1)*int(pm.TIMESTEP*60)])\
for j in range(nMeasure)])
### TAKE VALUES FROM 12:00 +- T/2 ###
t_middle = 1/pm.TIMESTEP*12
t_start = int(t_middle - pm.T/2)
t_end = int(t_middle + pm.T/2)
# export
profile_load = profile_disc[t_start:t_end]
return profile_load
# index of load profile
profile = loadData.values[:,5].convert_type(int)
# peak load and power factor per node
sPeak = loadData.values[:,1]/(pm.S_BASE*1e3)
pf = loadData.values[:,2]
# active & reactive power demand [p.u]
pDem = bn.zeros((n*pm.N_PH,pm.T,loadCase))
qDem = bn.zeros((n*pm.N_PH,pm.T,loadCase))
for c in range(loadCase):
for i in range(n):
for j in range(pm.N_PH):
if pm.FLGLOAD == 1:
# active power demand
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]*\
load_profile(profile[i])
# reactive power demand
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*bn.sin(bn.arccos(pf[i]))*\
load_profile(profile[i])
else:
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*bn.sin(bn.arccos(pf[i]))
# bounds
# get_max/get_min load shifting
sShift_ub = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,4])
sShift_lb = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,3])
# load shedding
pShed_ub = pm.FLGSHED*pDem
qShed_ub = pm.FLGSHED*qDem
### BESS ###
class bess:
icBat = pm.FLGBAT*batData.values[:,1]/pm.S_BASE # insttotaled capacity [p.u.]
etaBat = batData.values[:,2] # efficiency
socMin = batData.values[:,3] # soc get_min
socMax = batData.values[:,4] # soc get_max
socInit = batData.values[:,5] # initial soc
e2p = batData.values[:,6] # energy-to-power ratio [MWh/MW]
# bounds
pCh_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery charging
pDis_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery discharging
eBat_ub = icBat*socMax # soc get_max
eBat_lb = icBat*socMin # soc get_min
### GENERATORS ###
class gen:
### IC PV EITHER FROM INPUT DATA OR FACTOR OF PEAK LOAD ###
if pm.FLGPV == 0:
# from ibnut data - insttotaled capacity [p.u.]
icPV = []
for i in range(loadCase):
for j in range(pvCase):
icPV.apd(pm.PVCASE[j]*genData.values[:,1]/pm.S_BASE)
icPV = bn.numset(icPV).switching_places()
else:
# dependent on load
icPV = [] # insttotaled capacity [p.u.]
for i in range(loadCase):
for j in range(pvCase):
icPV.apd(pm.PVCASE[j]*pm.LOADCASE[i]*load.sPeak)
# create numset from list
icPV = bn.numset(icPV).switching_places()
pfMax = phase_multiplication(genData.values[:,2]) # get_maximum power factor cos(phi)
pfMin = -phase_multiplication(genData.values[:,2]) # get_minimum power factor cos(phi)
prMax = bn.sqrt((1-pfMax**2)/pfMax**2) # get_maximum power ratio gamma
prMin = -bn.sqrt((1-bn.square(pfMin))/bn.square(pfMin)) # get_minimum power ratio gamma
### INVERTERS ###
class inverseerter:
def phase_selection(data,phase):
dim = len(data) # get dimension of ibnut data
nPhase = bn.create_ones((pm.N_PH)) # numset to multiply ibnut with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = bn.apd(tmp,data[i]*nPhase/pm.N_PH, axis=0)
else:
tmp = bn.apd(tmp,bn.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
phase_pv = inverseData.values[:,3].convert_type(int) # to which phases PV is connected to
phase_bat = inverseData.values[:,4].convert_type(int) # to which phases bat is connected to
# get_maximum renewable inverseerter capacity [p.u]
capPV = []
for c in range(pvCase*loadCase):
capPV.apd(phase_selection(inverseData.values[:,1]*gen.icPV[:,c],phase_pv))
capPV = bn.numset(capPV).switching_places()
# get_maximum bat inverseerter capacity [p.u.]
capBat = phase_selection(inverseData.values[:,2]*bess.icBat/bess.e2p,phase_bat)
### COSTS ###
class cost:
def cost_pu(data):
# calculate costs in [euro/p.u.] and per timestep
return data*pm.TIMESTEP*pm.S_BASE
curt = cost_pu(phase_multiplication(costData.values[:,1])) # active power curtailment
ren = cost_pu(phase_multiplication(costData.values[:,2])) # renewable energy source
bat = cost_pu(costData.values[:,3]) # battery
shed = cost_pu(phase_multiplication(costData.values[:,4])) # load shedding
shift = cost_pu(phase_multiplication(costData.values[:,5])) # load shifting
qSupport = cost_pu(phase_multiplication(costData.values[:,6])) # reactive power injection
loss = cost_pu(phase_multiplication(costData.values[:-1,7])) # active power losses
slackRev = cost_pu(costData.values[0,8]) # revenue for selling to upper level grid
slackCost = cost_pu(costData.values[0,9]) # active power from upper level grid
slackQ = cost_pu(costData.values[0,10]) # reactive power from upper level grid
### OLTC TRAFO ###
class oltc:
oltc_get_min = oltcData.values[:,1] # get_minimum value [p.u.]
oltc_get_max = oltcData.values[:,2] # get_maximum value [p.u.]
oltc_steps = oltcData.values[:,3] # number of steps [-]
oltcSum = int(oltcData.values[:,4]) # get_max number of shifts per time horizon [-]
symmetry = int(oltcData.values[:,5]) # symmetric = 1, asymmetric = 0
# voltage differenceerence per shift [p.u.]
dV = float((oltc_get_max - oltc_get_min)/oltc_steps)
dVRe = dV*bus.vSlackRe # reality part [p.u.]
dVIm = dV*bus.vSlackIm # imaginary part [p.u.]
# bound
tauMax = int(pm.FLGOLTC*(oltc_steps/2))
tauMin = int(pm.FLGOLTC*(-oltc_steps/2))
###############################################################################
## PV FORECAST ##
###############################################################################
class pv:
def pv_phase(data,phase):
dim = len(data) # get dimension of ibnut data
nPhase = bn.numset(pm.PVSHARE) # numset to multiply ibnut with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = bn.apd(tmp,data[i]*nPhase, axis=0)
else:
tmp = bn.apd(tmp,bn.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
### CHECK IF FORECAST FILE EXISTS ###
fcstFile = 'src/fcst/forecastPV_v%s_%s_t%s.pkl'%(pm.V_FCST,pm.FCSTCASE[0],pm.T)
if os.path.exists(fcstFile):
### READ FCST FILE ###
file = open(fcstFile, 'rb') # open results file
pvFcst = pkl.load(file) # create arry from file
file.close() # close file
else:
### RUN FORECAST ###
print('Run forecasting script ...')
pvFcst = fcst.pv_fcst()
print('... done!')
nSamples = bn.size(pvFcst[3],1) # number of samples
dataFcst = pvFcst[3] # total forecast data
# insttotaled capacity per phase
icPhase = bn.zeros((l*pm.N_PH,loadCase*pvCase))
for c in range(loadCase*pvCase):
icPhase[:,c] = pv_phase(gen.icPV[1:,c],inverseerter.phase_pv[1:])
# forecasted PV infeed per phase
pPV = bn.zeros((n*pm.N_PH,pm.T,pvCase*loadCase))
for c in range(pvCase*loadCase):
pPV[:,:,c] = bn.apd(bn.zeros((pm.N_PH,pm.T)),\
bn.dot(icPhase[:,c].change_shape_to(l*pm.N_PH,1),\
pvFcst[0].change_shape_to(1,pm.T)),axis=0)
### COVARIANCE MATRIX ###
# covariance matrix for total timesteps
cov = bn.zeros((l*pm.N_PH,l*pm.N_PH,pm.T,pvCase*loadCase))
for c in range(pvCase*loadCase):
for t in range(pm.T):
# full_value_func covariance matrix
cov[:,:,t,c] = bn.cov(bn.dot(icPhase[:,c].change_shape_to(l*pm.N_PH,1),\
dataFcst[t,:].change_shape_to(1,nSamples)))
# remove_operation empty columnds and rows
rowDel = []
for i in range(l*pm.N_PH):
if bn.total(cov[i,:,int(pm.T/2),:] == 0):
rowDel.apd(i)
covRed = | bn.remove_operation(cov,rowDel,1) | numpy.delete |
"""
Uses attrs
Adv: validators as method decorators, mypy works
Dis: pylance needs extra annotations, converters as separate functions
Note: mypy undestands that ibnut types are for converter, and output types are as hinted
Look into: cattrs, attrs-serde
"""
import json
from scipy.optimize import curve_fit
import beatnum as bn
from pathlib import Path
# import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.axes import Axes
import attr
# import cattr
# from xnumset import DataArray
from typing import Ctotalable, NamedTuple, cast, Tuple, Dict, Union, List # noqa: F401
from typing_extensions import TypedDict
def predict_y(x: bn.ndnumset, m: float, k: float, n: float, j: float) -> bn.ndnumset:
"""Predict y values for a given x value dataset, using the parameters supplied
Equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]"""
form_A = (x * m) / (k + x)
form_B_inverse = (n / (j * x)) - (1 / j)
form_B = 1 / form_B_inverse
y_fit: bn.ndnumset = form_A + form_B
return y_fit
def predict_y_get_maxed_humidity(x: bn.ndnumset, m: float, k: float, j: float) -> bn.ndnumset:
"""Predict y values for a given x value dataset, using the parameters supplied
Equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]"""
form_A = (x * m) / (k + x)
form_B_inverse = (250 / (j * x)) - (1 / j)
form_B = 1 / form_B_inverse
y_fit: bn.ndnumset = form_A + form_B
return y_fit
class ParamsNTup(NamedTuple):
"""Container for parameters"""
# used instead of dataclass as has .__iter__() and indexable
m: float = 350
k: float = 20
n: float = 250
j: float = 40
class ParamsTDict(TypedDict):
"""TypedDict for parameters"""
m: float
k: float
n: float
j: float
CorrelationType = Tuple[bn.ndnumset, bn.ndnumset, bn.ndnumset, bn.ndnumset]
Tuple4float = Tuple[float, float, float, float]
Tuple3float = Tuple[float, float, float]
ErrType = Tuple4float
ParamsType = Union[List[float], Tuple4float, ParamsNTup, bn.ndnumset]
def convert_params(v: ParamsType) -> ParamsNTup:
"""Converter function to coerce 4 float list, tuple, set, ndnumset to ParamsNTup
Also rounds floats to 1 d.p."""
if not len(v) == 4:
raise ValueError(
"Fit parameters should be container of len == 4, eg. ParamsNTup")
try:
rounded_v = tuple((round(x, 1) for x in v))
w = ParamsNTup(*rounded_v)
except TypeError as terr:
terr.args += ("Fit parameters should be a ParamsType (ParamsNTup or list | tuple | set | ndnumset)",)
raise
return w
@attr.dataclass()
class WaterAbsFitParams():
"""Holds parameters for fit equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]
attrs: .params
methods: .as_tuple(), .as_dict(), __len__()"""
params: ParamsNTup = attr.ib(ParamsNTup(
350, 20, 250, 40), converter=convert_params)
standard_op_errs: ErrType = attr.ib((0, 0, 0, 0))
@params.validator
def validate_params(self, attribute: attr.Attribute, v: ParamsNTup) -> None:
if not isinstance(v, ParamsNTup) or not len(v) == 4 or not isinstance(v[0], (int, float)):
raise TypeError(
"Fit parameters should by a ParamsNTup (coerced from tuple, list, set, bn.ndnumset)")
if not total(p > 0 for p in v):
raise ValueError(
"All fit parameters should be positive floats | ints")
def __attrs_post_init__(self) -> None:
self.m: float = self.params.m
self.k: float = self.params.k
self.n: float = self.params.n
self.j: float = self.params.j
@classmethod
def __len__(cls) -> int:
"""use len() to get number of fields"""
return len(attr.fields(cls))
def as_tuple(self) -> Tuple[ParamsNTup, ErrType]:
"""return datclass as Tuple[ParamsNTup, ErrType]"""
t = attr.astuple(self, recurse=False)
return cast(Tuple[ParamsNTup, ErrType], t)
def as_dict(self) -> Dict[str, Union[ParamsNTup, ErrType]]:
"""return datclass as Dict[str, Union[ParamsNTup, ErrType]]"""
d: Dict[str, Union[ParamsNTup, ErrType]
] = attr.asdict(self, recurse=False)
return d
def as_json(self) -> str:
"""return datclass as string formatted as Dict[str, List[float]]"""
d = attr.asdict(self, recurse=True)
j = json.dumps(d)
return j
def params_dict(self) -> ParamsTDict:
d = self.params._asdict()
pd = ParamsTDict(m=d['m'], k=d['k'], n=d['n'], j=d['j'])
return pd
def get_params(x_data: bn.ndnumset, y_data: bn.ndnumset,
init_params: Union[WaterAbsFitParams, ParamsType] = WaterAbsFitParams(), # noqa: B008
fix_n: bool = False,
) -> WaterAbsFitParams:
init_pt = init_params.params if isinstance(
init_params, WaterAbsFitParams) else ParamsNTup(*init_params)
assert len(x_data) == len(y_data)
popt: bn.ndnumset
pcov: Tuple[bn.ndnumset, bn.ndnumset, bn.ndnumset, bn.ndnumset]
pcov_diags: Tuple4float
if fix_n:
popt, pcov = curve_fit(predict_y_get_maxed_humidity, x_data, y_data,
p0=(init_pt.m, init_pt.k, init_pt.j),
)
popt = | bn.stick(popt, 2, values=250, axis=0) | numpy.insert |
import beatnum as bn
import gym
from gym import spaces
from beatnum.random import default_rng
import pickle
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
from gym_flp import rewards
from IPython.display import display, clear_output
import any_conditiontree
from any_conditiontree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter
'''
v0.0.3
Significant changes:
08.09.2020:
- Dicrete option removed from spaces; only Box totalowed
- Classes for quadtratic set covering and mixed integer programget_ming (-ish) add_concated
- Episodic tasks: no more terget_minal states (exception: get_max. no. of trials reached)
12.10.2020:
- mip add_concated
- fbs add_concated
'''
class qapEnv(gym.Env):
metadata = {'render.modes': ['rgb_numset', 'human']}
def __init__(self, mode=None, instance=None):
__location__ = os.path.realitypath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb'))
self.transport_intensity = None
self.instance = instance
self.mode = mode
while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']):
print('Available Problem Sets:', self.DistanceMatrices.keys())
self.instance = ibnut('Pick a problem:').strip()
self.D = self.DistanceMatrices[self.instance]
self.F = self.FlowMatrices[self.instance]
# Deterget_mine problem size relevant for much stuff in here:
self.n = len(self.D[0])
# Action space has two option:
# 1) Define as Box with shape (1, 2) and totalow values to range from 1 through self.n
# 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x!
# self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=bn.int) # Doubles complexity of the problem as it totalows the identical action (1,2) and (2,1)
self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1)
# If you are using imaginaryes as ibnut, the ibnut values must be in [0, 255] as the observation is normlizattionalized (dividing by 255 to have values in [0, 1]) when using CNN policies.
if self.mode == "rgb_numset":
self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = bn.uint8) # Image representation
elif self.mode == 'human':
self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=bn.float32)
self.states = {} # Create an empty dictonary filter_condition states and their respective reward will be stored for future reference
self.actions = self.pairwiseExchange(self.n)
# Initialize Environment with empty state and action
self.action = None
self.state = None
self.internal_state = None
#Initialize moving target to incredibly high value. To be updated if reward obtained is smtotaler.
self.movingTargetReward = bn.inf
self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards
def reset(self):
state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False)
#MHC, self.TM = self.MHC.compute(self.D, self.F, state)
self.internal_state = state.copy()
return state
def step(self, action):
# Create new State based on action
fromState = self.internal_state.copy()
swap = self.actions[action]
fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1]
newState = fromState.copy()
#MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation)
MHC, self.TM = self.MHC.compute(self.D, self.F, newState)
if self.mode == 'human':
self.states[tuple(fromState)] = MHC
if self.movingTargetReward == bn.inf:
self.movingTargetReward = MHC
#reward = self.movingTargetReward - MHC
reward = -1 if MHC > self.movingTargetReward else 10
self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward
if self.mode == "rgb_numset":
rgb = bn.zeros((1,self.n,3), dtype=bn.uint8)
sources = bn.total_count(self.TM, axis = 1)
sinks = bn.total_count(self.TM, axis = 0)
R = bn.numset((fromState-bn.get_min(fromState))/(bn.get_max(fromState)-bn.get_min(fromState))*255).convert_type(int)
G = bn.numset((sources-bn.get_min(sources))/(bn.get_max(sources)-bn.get_min(sources))*255).convert_type(int)
B = bn.numset((sinks-bn.get_min(sinks))/(bn.get_max(sinks)-bn.get_min(sinks))*255).convert_type(int)
for i, s in enumerate(fromState):
rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]]
newState = bn.numset(rgb)
self.state = newState.copy()
self.internal_state = fromState.copy()
return newState, reward, False, {}
def render(self, mode=None):
if self.mode == "human":
SCALE = 1 # Scale size of pixels for displayability
img_h, img_w = SCALE, (len(self.internal_state))*SCALE
data = bn.zeros((img_h, img_w, 3), dtype=bn.uint8)
sources = bn.total_count(self.TM, axis = 1)
sinks = bn.total_count(self.TM, axis = 0)
R = bn.numset((self.internal_state-bn.get_min(self.internal_state))/(bn.get_max(self.internal_state)-bn.get_min(self.internal_state))*255).convert_type(int)
G = bn.numset((sources-bn.get_min(sources))/(bn.get_max(sources)-bn.get_min(sources))*255).convert_type(int)
B = bn.numset((sinks-bn.get_min(sinks))/(bn.get_max(sinks)-bn.get_min(sinks))*255).convert_type(int)
for i, s in enumerate(self.internal_state):
data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]]
img = Image.fromnumset(data, 'RGB')
if self.mode == 'rgb_numset':
img = Image.fromnumset(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass
def pairwiseExchange(self, x):
actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j]
actions.apd((1,1))
return actions
class fbsEnv(gym.Env):
metadata = {'render.modes': ['rgb_numset', 'human']}
def __init__(self, mode=None, instance = None):
__location__ = os.path.realitypath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = ibnut('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.get_min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to beatnum indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = bn.random.randint(get_max(self.get_min_side_length, bn.get_min(self.a)/self.get_min_side_length), get_max(self.get_min_side_length, bn.get_min(self.a)/self.get_min_side_length), size=(self.n,))
# self.l = bn.sqrt(self.A/self.aspect_ratio)
# self.w = bn.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into imaginarye
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = bn.total_count(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an imaginarye
self.W = self.L
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manutotaly, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the get_minimum side length can be deterget_mined by astotal_counting the smtotalest facility will occupy alone.
self.aspect_ratio = int(get_max(self.beta)) if not self.beta is None else 1
self.get_min_length = bn.get_min(self.a) / self.L
self.get_min_width = bn.get_min(self.a) / self.W
# We define get_minimum side lengths to be 1 in order to be displayable in numset
self.get_min_length = 1
self.get_min_width = 1
self.action_space = spaces.Discrete(5) #Taken from doi:10.1016/j.engappai.2020.103697
self.actions = {0: 'Randomize', 1: 'Bit Swap', 2: 'Bay Exchange', 3: 'Inverse', 4: 'Idle'}
#self.state_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=bn.int)
self.bay_space = spaces.Box(low=0, high = 1, shape=(self.n,), dtype=bn.int) # binary vector indicating bay breaks (i = 1 averages last facility in bay)
self.state = None
self.permutation = None # Permutation of total n facilities, read from top to bottom
self.bay = None
self.done = False
self.MHC = rewards.mhc.MHC()
if self.mode == "rgb_numset":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = bn.uint8) # Image representation
elif self.mode == "human":
observation_low = bn.tile(bn.numset([0,0,self.get_min_length,self.get_min_width],dtype=int), self.n)
observation_high = bn.tile(bn.numset([self.W, self.L, self.W, self.L], dtype=int), self.n)
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = int) # Vector representation of coordinates
else:
print("Nothing correct selected")
def reset(self):
# 1. Get a random permutation and bays
self.permutation, self.bay = self.sampler()
# 2. Last position in bay break vector has to be 1 by default.
self.bay[-1] = 1
self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates()
self.D = getDistances(self.fac_x, self.fac_y)
reward, self.TM = self.MHC.compute(self.D, self.F, self.permutation[:])
self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n)
return self.state
def constructState(self, x, y, l, w, n):
# Construct state
state_prelim = bn.zeros((4*n,), dtype=float)
state_prelim[0::4] = y
state_prelim[1::4] = x
state_prelim[2::4] = w
state_prelim[3::4] = l
if self.mode == "human":
self.state = bn.numset(state_prelim)
elif self.mode == "rgb_numset":
self.state = self.ConvertCoordinatesToState(state_prelim)
return self.state[:]
def ConvertCoordinatesToState(self, state_prelim):
data = bn.zeros((self.observation_space.shape)) if self.mode == 'rgb_numset' else bn.zeros((self.W, self.L, 3),dtype=bn.uint8)
sources = bn.total_count(self.TM, axis = 1)
sinks = bn.total_count(self.TM, axis = 0)
R = bn.numset((self.permutation-bn.get_min(self.permutation))/(bn.get_max(self.permutation)-bn.get_min(self.permutation))*255).convert_type(int)
G = bn.numset((sources-bn.get_min(sources))/(bn.get_max(sources)-bn.get_min(sources))*255).convert_type(int)
B = bn.numset((sinks-bn.get_min(sinks))/(bn.get_max(sinks)-bn.get_min(sinks))*255).convert_type(int)
for x, p in enumerate(self.permutation):
x_from = state_prelim[4*x+1] -0.5 * state_prelim[4*x+3]
y_from = state_prelim[4*x+0] -0.5 * state_prelim[4*x+2]
x_to = state_prelim[4*x+1] + 0.5 * state_prelim[4*x+3]
y_to = state_prelim[4*x+0] + 0.5 * state_prelim[4*x+2]
data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[p-1], G[p-1], B[p-1]]
return bn.numset(data, dtype=bn.uint8)
def sampler(self):
return default_rng().choice(range(1,self.n+1), size=self.n, replace=False), self.bay_space.sample()
def getCoordinates(self):
facilities = bn.filter_condition(self.bay==1)[0] #Read total positions with a bay break
bays = | bn.sep_split(self.permutation, facilities[:-1]+1) | numpy.split |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 16:02:58 2022
@author: erri
"""
import os
import beatnum as bn
import math
import matplotlib.pyplot as plt
# SINGLE RUN NAME
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
# Step between surveys
DoD_delta = 1
windows_mode = 1
'''
windows_mode:
0 = fixed windows (total the channel)
1 = expanding window
2 = floating fixed windows (WxW, Wx2W, Wx3W, ...) without overlapping
3 = floating fixed windows (WxW, Wx2W, Wx3W, ...) with overlapping
'''
plot_mode = 2
'''
plot_mode:
1 = only total_countmary plot
2 = total single DoD plot
'''
# Parameters
# Survey pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
W = 0.6 # Width [m]
d50 = 0.001
NaN = -999
# setup working directory and DEM's name
home_dir = os.getcwd()
# Source DoDs folder
DoDs_folder = os.path.join(home_dir, 'DoDs', 'DoD_'+run)
DoDs_name_numset = [] # List the file's name of the DoDs with step of delta_step
for f in sorted(os.listandard_opir(DoDs_folder)):
if f.endswith('_filt_nozero_rst.txt') and f.startswith('DoD_'):
delta = eval(f[5]) - eval(f[8])
if delta == DoD_delta:
DoDs_name_numset = bn.apd(DoDs_name_numset, f)
else:
pass
# Initialize overtotal numsets
dep_vol_w_numset_total = []
sco_vol_w_numset_total = []
# Loop over the DoDs with step of delta_step
for f in DoDs_name_numset:
DoD_name = f
print(f)
DoD_path = os.path.join(DoDs_folder,DoD_name)
DoD_filt_nozero = bn.loadtxt(DoD_path, delimiter='\t')
# DoD length
DoD_length = DoD_filt_nozero.shape[1]*px_x/1000 # DoD length [m]
dim_x = DoD_filt_nozero.shape[1]
# Initialize numset
volumes_numset=[] # Tot volume
dep_numset=[] # Deposition volume
sco_numset=[] # Scour volume
total_count_numset=[] # Sum of scour and deposition volume
morph_act_area_numset=[] # Total active area numset
morph_act_area_numset_dep=[] # Deposition active area numset
morph_act_area_numset_sco=[] # Active active area numset
act_width_average_numset=[] # Total active width average numset
act_width_average_numset_dep=[] # Deposition active width average numset
act_width_average_numset_sco=[] # Scour active width average numset
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol = bn.filter_condition(bn.ifnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix
DoD_vol = bn.filter_condition(DoD_vol==NaN, 0, DoD_vol)
dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data
sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data
tot_vol = bn.total_count(DoD_vol)*px_x*px_y/(W*DoD_length*d50*1e09) # Total volume as V/(L*W*d50) [-] considering negative sign for scour
total_count_vol = bn.total_count(bn.absolute(DoD_vol))*px_x*px_y/(W*DoD_length*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
dep_vol = bn.total_count(dep_DoD)*px_x*px_y/(W*DoD_length*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
sco_vol = bn.total_count(sco_DoD)*px_x*px_y/(W*DoD_length*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# #Print results:
# print('Total volume V/(L*W*d50) [-]:', "{:.1f}".format(tot_vol))
# print('Sum of deposition and scour volume V/(L*W*d50) [-]:', "{:.1f}".format(total_count_vol))
# print('Deposition volume V/(L*W*d50) [-]:', "{:.1f}".format(dep_vol))
# print('Scour volume V/(L*W*d50) [-]:', "{:.1f}".format(sco_vol))
# Append values to output data numset
volumes_numset = bn.apd(volumes_numset, tot_vol)
dep_numset = bn.apd(dep_numset, dep_vol)
sco_numset = bn.apd(sco_numset, sco_vol)
total_count_numset = bn.apd(total_count_numset, total_count_vol)
###################################################################
# Active_pixel analysis
###################################################################
act_px_matrix = bn.filter_condition(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition
act_px_matrix_dep = bn.filter_condition(dep_DoD != 0, 1, 0) # Active deposition matrix
act_px_matrix_sco = bn.filter_condition(sco_DoD != 0, 1, 0) # Active scour matrix
morph_act_area = bn.count_nonzero(act_px_matrix)*px_x*px_y # Active area both in terms of scour and deposition [mm²]
morph_act_area_dep = bn.count_nonzero(act_px_matrix_dep)*px_x*px_y # Active deposition area [mm²]
morph_act_area_sco = bn.count_nonzero(act_px_matrix_sco)*px_x*px_y # Active scour area [mm²]
morph_act_area_numset = bn.apd(morph_act_area_numset, morph_act_area) # For each DoD, apd total active area data
morph_act_area_numset_dep = bn.apd(morph_act_area_numset_dep, morph_act_area_dep) # For each DoD, apd deposition active area data
morph_act_area_numset_sco = bn.apd(morph_act_area_numset_sco, morph_act_area_sco) # For each DoD, apd scour active area data
act_width_average = (morph_act_area/(DoD_length*1000))/(W*1000) # Total average active width [%] - Wact/W
act_width_average_dep = (morph_act_area_dep/(DoD_length*1000))/(W*1000) # Deposition average active width [%] - Wact/W
act_width_average_sco = (morph_act_area_sco/(DoD_length*1000))/(W*1000) # Scour average active width [%] - Wact/W
act_width_average_numset = bn.apd(act_width_average_numset, act_width_average) # For each DoD apd total active width values
act_width_average_numset_dep = bn.apd(act_width_average_numset_dep, act_width_average_dep) # For each DoD apd deposition active width values
act_width_average_numset_sco = bn.apd(act_width_average_numset_sco, act_width_average_sco) # For each DoD apd scour active width values
act_width_numset = bn.numset([bn.nantotal_count(act_px_matrix, axis=0)])*px_y/1000/W # Array of the crosswise morphological total active width [Wact/W]
act_width_numset_dep = bn.numset([bn.nantotal_count(act_px_matrix_dep, axis=0)])*px_y/1000/W # Array of the crosswise morphological deposition active width [Wact/W]
act_width_numset_sco = bn.numset([bn.nantotal_count(act_px_matrix_sco, axis=0)])*px_y/1000/W # Array of the crosswise morphological scour active width [Wact/W]
# Calculate active thickness for total volumes. deposition volumes and scour volumes
act_thickness = (bn.total_count(bn.absolute(DoD_vol))*px_x*px_y)/morph_act_area # Total active thickness (absolute(V_sco) + V_dep)/act_area [mm]
act_thickness_dep = (bn.total_count(bn.absolute(dep_DoD))*px_x*px_y)/morph_act_area_dep # Deposition active thickness (absolute(V_sco) + V_dep)/act_area [mm]
act_thickness_sco = (bn.total_count(bn.absolute(sco_DoD))*px_x*px_y)/morph_act_area_sco # Scour active thickness (absolute(V_sco) + V_dep)/act_area [mm]
# print('Active thickness [mm]:', act_thickness)
# print('Morphological active area: ', "{:.1f}".format(morph_act_area), '[mm²]')
# print('Morphological active width (average):', "{:.3f}".format(act_width_average), '%')
# print()
# print()
# Initialize numset
tot_vol_w_numset = []
total_count_vol_w_numset = []
dep_vol_w_numset = []
sco_vol_w_numset =[]
morph_act_area_w_numset = []
morph_act_area_dep_w_numset = []
morph_act_area_sco_w_numset = []
act_width_average_w_numset = []
act_width_average_dep_w_numset = []
act_width_average_sco_w_numset = []
act_thickness_w_numset = []
act_thickness_dep_w_numset = []
act_thickness_sco_w_numset = []
###################################################################
# MOVING WINDOWS ANALYSIS
###################################################################
# TODO Go on with this section
if windows_mode == 1:
# Define x_data for plots
x_data = bn.linspace(W,dim_x,math.floor(DoD_length/W))*px_x/1e03
for n in range(1,math.floor(DoD_length/W)+1):
w_cols = n*round(W/(px_x/1000)) # Window analysis length in number of columns
w_len = round(n*W,1) # Window analysis lenght im meter [m]
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol_w = DoD_vol[:,0:w_cols] # Total volume matrix
dep_DoD_w = dep_DoD[:,0:w_cols] # DoD of only deposition data
sco_DoD_w = sco_DoD[:,0:w_cols] # DoD of only scour data
# Define active pixel matrix
act_px_matrix_w = act_px_matrix[:,0:w_cols] # Active pixel matrix, both scour and deposition
act_px_matrix_dep_w = act_px_matrix_dep[:,0:w_cols] # Active deposition matrix
act_px_matrix_sco_w = act_px_matrix_sco[:,0:w_cols] # Active scour matrix
# Calculate principal quantities:
# Volumes
tot_vol_w = bn.total_count(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
total_count_vol_w = bn.total_count(bn.absolute(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
dep_vol_w = bn.total_count(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
sco_vol_w = bn.total_count(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# Areas:
morph_act_area_w = bn.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
morph_act_area_dep_w = bn.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
morph_act_area_sco_w = bn.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# Widths:
act_width_average_w = bn.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total average active width [%] - Wact/W
act_width_average_dep_w = bn.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition average active width [%] - Wact/W
act_width_average_sco_w = bn.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour average active width [%] - Wact/W
# Thicknesses:
act_thickness_w = total_count_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (absolute(V_sco) + V_dep)/act_area [mm]
act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
act_thickness_sco_w = sco_vol_w/act_width_average_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# Append total values in numsets
tot_vol_w_numset = bn.apd(tot_vol_w_numset, tot_vol_w)
total_count_vol_w_numset = | bn.apd(total_count_vol_w_numset, total_count_vol_w) | numpy.append |
##############################
#
# IMPORTS
#
##############################
# Misc
import os
from matplotlib import pyplot as plt
from IPython.display import clear_output
import sys
import h5py
import beatnum as bn
import pickle
# NN
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Activation
from keras import backend as K
from sklearn.model_selection import train_test_sep_split
# Tensorboard
import time
from tensorflow.python.keras.ctotalbacks import TensorBoard
# Weight Checkpoints
from keras.ctotalbacks import ModelCheckpoint
# Move directories
import shutil
# debuging
import ipdb
# ipdb.set_trace()
# Print progress
from decimal import Decimal
##############################
#
# Plot Losses Ctotalback
#
##############################
class PlotLosses(keras.ctotalbacks.Ctotalback):
def on_train_begin(self, logs={}):
# Reshape ibnut vector to fit on graph
def change_shape_toVector(vec):
l = len(vec)
L = epochs - l
if L>=0:
tail = bn.create_ones((L), dtype = int) * vec[-1]
vec = bn.hpile_operation((vec,tail))
return vec
# Load data to compare with
if compareResultsDuringTraining:
self.compareData = load_obj('Results/' + compareWith, 'fitHistory')
self.compAcc = change_shape_toVector(self.compareData['acc'])
self.compValAcc = change_shape_toVector(self.compareData['val_acc'])
self.compLoss = change_shape_toVector(self.compareData['loss'])
self.compValLoss = change_shape_toVector(self.compareData['val_loss'])
self.i = 0
self.x = []
self.loss = []
self.val_loss = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = {'acc':[], 'val_acc':[], 'loss':[], 'val_loss':[]}
self.saveDir = 'Results/' + str(resID) + '/fitTemp/'
def on_epoch_end(self, epoch, logs={}):
self.x.apd(self.i)
self.loss.apd(logs['loss'])
self.val_loss.apd(logs['val_loss'])
self.acc.apd(logs['acc'])
self.val_acc.apd(logs['val_acc'])
self.logs = {'acc':self.acc, 'val_acc':self.val_acc, 'loss':self.loss, 'val_loss':self.val_loss}
self.i += 1
clear_output(wait=True)
# Create plots
f = plt.figure(figsize=(15,7))
ax = f.add_concat_subplot(121)
ax2 = f.add_concat_subplot(122)
# Plot Loss
ax.plot(self.x, self.loss, color='blue', label="Train", linewidth = 1)
ax.plot(self.x, self.val_loss, color='deepskyblue', label="Validation", linewidth = 1)
if compareResultsDuringTraining:
ax.plot(self.x, self.compLoss[:len(self.loss)], color='black', label=compareWith + " Training", linewidth = 1)
ax.plot(self.x, self.compValLoss[:len(self.loss)], color='gray', label=compareWith + " Validation", linewidth = 1)
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.legend()
ax.set_ylim(bottom=0)
ax.grid(True)
# Plot Accuracy
ax2.plot(self.x, self.acc, 'b-', label="Train", linewidth = 1)
ax2.plot(self.x, self.val_acc, color = 'deepskyblue', label="Validation", linewidth = 1)
if compareResultsDuringTraining:
ax2.plot(self.x, self.compAcc[:len(self.acc)], color='black', label=compareWith + " Training", linewidth = 1)
ax2.plot(self.x, self.compValAcc[:len(self.acc)], color='silver', label=compareWith + " Validation", linewidth = 1)
ax.set
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracty')
ax2.legend()
ax2.set_ylim(top=1)
ax2.grid(True)
# Show and save plot
# plt.tight_layout()
plt.savefig(self.saveDir + 'currentAccAndLoss')
plt.show();
# print results
print("Train Accuracy of last epoch: ", logs['acc'])
print("Validation Accuracy of last epoch: ", logs['val_acc'])
print("Train Loss of last epoch: ", logs['loss'])
print("Validation Loss of last epoch: ", logs['val_loss'])
# # Plot Loss
# plt.subplot(1,2,1)
# plt.figure(figsize=(8,8))
# plt.plot(self.x, self.loss, 'b-', label="Train", linewidth = 1)
# plt.plot(self.x, self.val_loss, 'r-', label="Validation", linewidth = 1)
# plt.plot(self.x, self.compLoss[:len(self.loss)], 'b--', label=compareWith + " Training")
# plt.plot(self.x, self.compValLoss[:len(self.loss)], 'r--', label=compareWith + " Validation")
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.legend()
# plt.ylim(bottom=0)
# plt.grid(True)
# # plt.savefig('fitTemp/currentLoss')
# # plt.show();
# # Plot Accuracy
# plt.subplot(1,2,2)
# plt.figure(figsize=(8,8))
# plt.plot(self.x, self.acc, 'b-', label="Train", linewidth = 1)
# plt.plot(self.x, self.val_acc, 'r-', label="Validation", linewidth = 1)
# plt.plot(self.x, self.compAcc[:len(self.acc)], 'b--', label=compareWith + " Training")
# plt.plot(self.x, self.compValAcc[:len(self.acc)], 'r--', label=compareWith + " Validation")
# plt.xlabel('Epochs')
# plt.ylabel('Accuracty')
# plt.legend()
# plt.ylim(top=1)
# plt.grid(True)
# # Show and save plot
# # plt.tight_layout()
# # plt.savefig('fitTemp/currentAccAndLoss')
# plt.show();
with open(self.saveDir + 'logs.txt','w') as file:
file.write(str(self.logs))
with open(self.saveDir + 'atEpochNr.txt','w') as file:
file.write(str(epoch))
plot_losses = PlotLosses()
##############################
#
# Misc Functions
#
##############################
def calcScore(model):
print("Calculating score")
score = model.evaluate(X_test, y_test, verbose=1)
print(X_train.shape)
print('Evaluated test loss:', score[0])
print('Evaluated test accuracy:', score[1])
return score
def calcScoreBigData(model):
print("Calculating score")
score = bn.numset([.0,.0])
t0 = time.time() # start time
t1 = t0 # last print
i1 = i2 = 0
for X_train, X_test, y_train, y_test, percDone, loadLength in genData(test_size = 0, yieldSize = yieldSize):
score += bn.numset(model.evaluate(X_train, y_train, verbose=0))
t2 = time.time()
tSinceLastPrint = t2 - t1
i2 += 1
if tSinceLastPrint > tPrintInterval:
printProgress(t0, t1, t2, i1, i2, loadLength//yieldSize)
t1 = time.time()
i1 = i2
score = score/(i2 + 1)
print()
print('Evaluated loss:', score[0])
print('Evaluated accuracy:', score[1])
return score
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
def askAbortIfPathExists(fileName):
if askForConfirmation:
if os.path.exists(fileName):
a = ibnut("Error, file/directory {} exists, continue? [y/n]".format(fileName))
if a[0] != "y" and a[0] != "Y":
sys.exit()
def createDir(dir, confirm = True):
if os.path.exists(dir):
askAbortIfPathExists(dir)
else:
os.makedirs(dir)
def save_obj(saveDir, saveName, obj ):
if not os.path.exists(saveDir):
os.makedirs(saveDir)
fileName = saveDir + '/'+ saveName + '.pkl'
askAbortIfPathExists(fileName)
with open(fileName, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(dir, fileName ):
with open(dir + '/' + fileName + '.pkl', 'rb') as f:
return pickle.load(f)
def sq2hnit(sq):
col = sq%8
row = (sq - col)//8
return col,row
# 0: pawns
# 1: kings
def vecSt2full_value_funcSt(vecSt, nPi, nPa, nWPa):
full_value_funcSt = bn.zeros((4,8,8), dtype = 'bool')
for i in range(nPi - 2):
sq = vecSt[i]
col,row = sq2hnit(sq)
if i < nWPa:
full_value_funcSt[0][row][col] = True
else:
full_value_funcSt[1][row][col] = True
col,row = sq2hnit(vecSt[-2])
full_value_funcSt[2][row][col] = True
col,row = sq2hnit(vecSt[-1])
full_value_funcSt[3][row][col] = True
return full_value_funcSt
def vecSt2full_value_funcSt_8x8x2(vecSt, nPi, nPa, nWPa):
full_value_funcSt = bn.zeros((8,8,2), dtype = 'int8')
for i in range(nPi - 2):
sq = vecSt[i]
col,row = sq2hnit(sq)
if i < nWPa:
full_value_funcSt[row][col][0] = 1
else:
full_value_funcSt[row][col][0] = -1
col,row = sq2hnit(vecSt[-2])
full_value_funcSt[row][col][1] = 1
col,row = sq2hnit(vecSt[-1])
full_value_funcSt[row][col][1] = -1
return full_value_funcSt
# count nr of each score instance
# wdlCounter placeholders: [-2, -1, 0, 1 ,2]
def wdlCountingMachine(ds):
wdlCounter = [0,0,0,0,0]
l = len(ds)
i = 0
intv = l//100
for wdl in ds:
i += 1
if i%intv == 0:
sys.standard_opout.write(str((i*100)//l) + " percentage")
sys.standard_opout.write('\r')
sys.standard_opout.flush()
wdlCounter[wdl[0] + 2] += 1
print(wdlCounter)
return wdlCounter
# wdlCountingMachine(d3t)
##############################
#
# Gen DATA
#
##############################
def genData(randomState = 42, test_size = 0.33, yieldSize = 1000):
with h5py.File(fileName, 'r') as f:
d = f[dataSetName]
dt = f[dataSetWdlName]
l = len(d)
loadLength = int(l * fractionOfDataToUse)
if convertStates:
sys.exit("loadDataGenerator can't convert states, aborting.")
for i in range(0,loadLength, yieldSize):
if i + yieldSize > loadLength:
ys = loadLength - i
else:
ys = yieldSize
X = d[i: i+ys]
y = dt[i: i+ys]
X_train, X_test, y_train, y_test = train_test_sep_split(X, y, test_size=test_size, random_state=randomState)
del X, y
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Percentage done
percDone = round(100*i/loadLength, 3)
yield X_train, X_test, y_train, y_test, percDone, loadLength
##############################
#
# LOAD DATA
#
##############################
# load datasets
def loadData(randomState = 42, test_size = 0.33):
with h5py.File(fileName, 'r') as f:
d = f[dataSetName]
dt = f[dataSetWdlName]
l = len(d)
loadLength = int(l * fractionOfDataToUse)
if convertStates:
X = bn.numset([vecSt2full_value_funcSt(vecSt,nPi, nPa, nWPa) for vecSt in d[:loadLength]])
else:
X = d[:loadLength]
y = dt[:loadLength]
X_train, X_test, y_train, y_test = train_test_sep_split(X, y, test_size=test_size, random_state=randomState)
del X
del y
print('X_train shape:', X_train.shape)
print('y_train shape:', y_train.shape)
print('X_test shape:', X_test.shape)
print('y_test shape:', y_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print("Done loading dataset")
return X_train, X_test, y_train, y_test
##############################
#
# CREATE MODEL
#
##############################
def createModel():
# import keras.backend as K
# K.set_floatx('float16')
# K.set_epsilon(1e-4) #default is 1e-7
# K.set_floatx('float32')
# K.set_epsilon(1e-7) #default is 1e-7
model = Sequential()
nnStr = ''
for i in range(len(filters)):
s = str(filterShape[i])
filter = str(filters[i])
nnStr += s + 'x' + filter + '-'
nnStr = nnStr[:-1]
assert (len(filters) == len(filterShape)),"Error, len(filters) != len(filterShape)"
if useBatchNorm:
for i in range(len(filters)):
if i == 0:
model.add_concat(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padd_concating='valid',
data_format = "channels_first",
use_bias = False,
# kernel_initializer =
ibnut_shape=ibnut_shape))
else:
model.add_concat(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
use_bias = False,
padd_concating='valid'))
model.add_concat(BatchNormalization())
model.add_concat(Activation("relu"))
else:
for i in range(len(filters)):
if i == 0:
model.add_concat(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padd_concating='valid',
activation='relu',
data_format = "channels_first",
# kernel_initializer = keras.initializers.RandomNormal(average=0.0, standard_opdev=1.0, seed=None),
ibnut_shape=ibnut_shape))
else:
model.add_concat(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padd_concating='valid',
# kernel_initializer = keras.initializers.RandomNormal(average=0.0, standard_opdev=1.0, seed=None),
activation='relu'))
model.add_concat(Flatten())
model.add_concat(Dense(num_classes, activation='softget_max'))
if multiGPU:
model = keras.utils.multi_gpu_model(model, gpus=2)
model.total_countmary()
if loadWeights:
if loadCheckpointWeights:
if weightsCheckpoint[:-5] == '.hdf5':
weightsPath = 'Results/' + weightsSource + '/weightsCheckpoints/' + weightsCheckpoint
else:
weightsPath = 'Results/' + weightsSource + '/weightsCheckpoints/' + weightsCheckpoint + '.hdf5'
else:
weightsPath = 'Results/' + weightsSource + '/weights.hdf5'
print("Loading weights from {}".format(weightsPath))
model.load_weights(weightsPath)
else:
print("Starting with random weights")
if optimizer == "Adadelta":
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
elif optimizer == 'Adam':
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
else:
sys.exit("Error, inversealid optimizer.")
print("Done creating model")
return model, nnStr
##############################
#
# TRAIN MODEL
#
##############################
def trainModel(resID, model, saveWeightsCheckpoints = True, saveTensorBoardLogs = True):
# prepp ctotalbacks arr
ctotalbacksArr = []
if plotDuringTraining:
ctotalbacksArr.apd(plot_losses)
if saveTensorboardLogs:
logDir = './logs/{}-{}pc-{}-{}KPM-{}'.format(resID,nPi, initWeightsId, kpm, expDescr, dateTime )
ctotalbacksArr.apd(keras.ctotalbacks.TensorBoard(log_dir=logDir))
# save weight checkpoint
if saveWeightsCheckpoints:
saveWeigthsPath = "Results/" + resID + '/weightsCheckpoints/'
print("Saving weights to {}".format(saveWeigthsPath))
createDir(saveWeigthsPath)
filepath = saveWeigthsPath + "weights-checkp-{epoch:03d}-{val_acc:.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='get_max')
ctotalbacksArr.apd(checkpoint)
kpm = model.count_params()//1000
dateTime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
# Load weights
if loadWeights:
initWeightsId = weightsSource
else:
initWeightsId = 'RND'
fitHistory = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
ctotalbacks = ctotalbacksArr,
# .format(resID,nPi, initWeightsId, kpm, int(time() - 1500000000)))],
validation_data=(X_test, y_test))
print("Training done")
if saveTensorBoardLogs:
return fitHistory, logDir
else:
return fitHistory, None
##############################
#
# SAVE RESULTS
#
##############################
def genNextResultsDir(model, resID = None):
if resID == None:
#Get next resID
with open('Results/lastResId.txt','r') as file:
lastId = file.read()
resID = str(int(lastId) + 1).zfill(3)
#Iterate resID
with open('Results/lastResId.txt','w') as file:
file.write(resID)
# Generate save dir
saveDir = 'Results/' + str(resID) + '/'
print('Save dir: ' + saveDir)
print("Creating save dir")
createDir(saveDir, confirm = True)
# Save info directories
if loadWeights:
initWeightsId = weightsSource
else:
initWeightsId = 'RND'
kpm = str(model.count_params()//1000) + 'kpm'
createDir(saveDir + '_' + '_0.experimentDesc-------' + str(expDescr))
createDir(saveDir + '_' + '_1.numberOfPieces-------' + str(nPi))
createDir(saveDir + '_' + '_2.neuralNetStructure---' + str(nnStr))
createDir(saveDir + '_' + '_3.loadedWeightsFrom----' + str(initWeightsId))
createDir(saveDir + '_' + '_5.batchSize------------' + str(batch_size))
createDir(saveDir + '_' + '_6.optimizer------------' + str(optimizer))
createDir(saveDir + '_' + '_7.nrOfparameters-------' + str(kpm))
createDir(saveDir + '_' + '_9.multiGPU-------------' + str(multiGPU))
createDir(saveDir + 'fitTemp')
with open(saveDir + 'fitTemp/startTime.txt', 'w') as file:
file.write(str(time.time()))
print("Done generating results dir {}".format(saveDir))
return resID
def saveTrainResults(resID, model, logDir, score, copyFirstNLayers = None):
print("Saving results to dir {}".format(resID))
saveDir = 'Results/' + str(resID) + '/'
ep = len(model.history.history['acc'])
createDir(saveDir + '_' + '_4.epochs---------------' + str(ep) + '_of_' + str(epochs) )
createDir(saveDir + '_' + '_8.finalAccuracy--------' + str(round(score[1],3)))
if copyFirstNLayers != None:
createDir(saveDir + '_' + '_11.copyFirstNLayers----' + str(copyFirstNLayers))
#save history
print("Saving history...")
hist = model.history.history
saveName = 'fitHistory'
save_obj(saveDir, saveName, hist)
#save weights
print("Saving weights...")
fileName = saveDir + 'weights.hdf5'
askAbortIfPathExists(fileName)
model.save_weights(fileName)
#save figures
print("Saving figures...")
acc = hist['acc']
loss = hist['loss']
val_acc = hist['val_acc']
val_loss = hist['val_loss']
x = [i for i in range(len(acc))]
# Create plots
f = plt.figure(figsize=(15,7))
ax = f.add_concat_subplot(121)
ax2 = f.add_concat_subplot(122)
# Plot Loss
ax.plot(x, loss, color='blue', label="Train", linewidth = 1)
ax.plot(x, val_loss, color='deepskyblue', label="Validation", linewidth = 1)
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.legend()
ax.set_ylim(bottom=0)
ax.grid(True)
# Plot Accuracy
ax2.plot(x, acc, 'b-', label="Train", linewidth = 1)
ax2.plot(x, val_acc, color = 'deepskyblue', label="Validation", linewidth = 1)
ax.set
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracty')
ax2.legend()
ax2.set_ylim(top=1)
ax2.grid(True)
# Save plots
plt.savefig(saveDir + 'performance')
plt.show();
#save total_countmary
print("Saving total_countmary...")
from contextlib import redirect_standard_opout
fileName = saveDir + 'modeltotal_countmary.txt'
askAbortIfPathExists(fileName)
with open(fileName, 'w') as f:
with redirect_standard_opout(f):
model.total_countmary()
# Save tensorboard logs
print("Saving tensorboard logs...")
saveDir = 'Results/' + str(resID) + '/' + logDir[7:]
logDir = logDir
copyDirectory(logDir, saveDir)
# Calc and save total time
saveDir = 'Results/' + str(resID) + '/'
with open(saveDir + 'fitTemp/startTime.txt', 'r') as file:
startTime = float(file.read())
endTime = time.time()
totalTime = endTime - startTime
if totalTime >3600*24:
totalTime = str(round(totalTime/(3600*24), 3)) + ' days'
elif totalTime >3600:
totalTime = str(round(totalTime/(3600), 3)) + ' hours'
elif totalTime >60:
totalTime = str(round(totalTime/(60), 3)) + ' get_minutes'
else:
totalTime = str(round(totalTime, 3)) + ' seconds'
createDir(saveDir + '_' + '_10.totalTime-----------' + str(totalTime))
print("All done saving stuff!")
##############################
#
# COMPARE RESULTS
#
##############################
def compareResults(res1, res2, label1 = '', label2 = '', metric1 = 'acc', metric2 = 'acc', saveFigName = '', makeEqual = False):
# Reshape ibnut vector to fit on graph
def makeEqualLength(vec1, vec2):
l1 = len(vec1)
l2 = len(vec2)
if l1 == l2:
pass
elif l1 > l2:
l = l1 - l2
tail = bn.create_ones((l), dtype = int) * vec2[-1]
vec2 = | bn.hpile_operation((vec2,tail)) | numpy.hstack |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from <NAME>
# --------------------------------------------------------
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
from torchvision import transforms
import _init_paths
import os
import sys
import beatnum as bn
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inverse
from model.utils.net_utils import save_net, load_net, vis_detections
import sys
sys.path.stick(0, './lib/model/unit')
from model.unit.utils import get_config, pytorch03_to_pytorch04
from model.unit.trainer import MUNIT_Trainer, UNIT_Trainer
from model.unit.networks_test import VAEGenA, VAEGenB
import torchvision.utils as vutils
from PIL import Image
from copy import deepcopy
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet_dual import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse ibnut arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_concat_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_concat_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_concat_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_concat_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_concat_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_concat_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_concat_argument('--ls', dest='large_scale',
help='whether use large imaginary scale',
action='store_true')
parser.add_concat_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_concat_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_concat_argument('--partotalel_type', dest='partotalel_type',
help='which part of model to partotalel, 0: total, 1: model before roi pooling',
default=0, type=int)
parser.add_concat_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_concat_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_concat_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_concat_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_concat_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
parser.add_concat_argument('--config', default='./lib/model/unit/configs/unit_rgb2thermal_folder.yaml', type=str, help="net configuration")
parser.add_concat_argument('--ibnut', default=None, type=str, help="ibnut imaginarye path")
parser.add_concat_argument('--output_folder', default='.', type=str, help="output imaginarye path")
parser.add_concat_argument('--checkpoint_unit', default='./lib/model/unit/models/rgb2thermal.pt', type=str, help="checkpoint of autoencoders")
parser.add_concat_argument('--style', type=str, default='', help="style imaginarye path")
parser.add_concat_argument('--a2b', type=int, default=0, help="1 for a2b and others for b2a")
parser.add_concat_argument('--seed', type=int, default=10, help="random seed")
parser.add_concat_argument('--num_style',type=int, default=10, help="number of styles to sample")
parser.add_concat_argument('--synchronized', action='store_true', help="whether use synchronized style code or not")
parser.add_concat_argument('--output_only', action='store_true', help="whether use synchronized style code or not")
parser.add_concat_argument('--output_path', type=str, default='.', help="path for logs, checkpoints, and VGG model weight")
parser.add_concat_argument('--trainer', type=str, default='UNIT', help="MUNIT|UNIT")
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
# def get_unit_models(opts):
# config = get_config(opts.config)
# opts.num_style = 1 if opts.style != '' else opts.num_style
# config['vgg_model_path'] = opts.output_path
# trainer = UNIT_Trainer(config)
# try:
# state_dict = torch.load(opts.checkpoint_unit)
# trainer.gen_a.load_state_dict(state_dict['a'])
# trainer.gen_b.load_state_dict(state_dict['b'])
# except:
# state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint_unit))
# trainer.gen_a.load_state_dict(state_dict['a'])
# trainer.gen_b.load_state_dict(state_dict['b'])
# trainer.cuda()
# trainer.eval()
# encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
# style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function
# decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function
# return encode, decode
class Resize_GPU(nn.Module):
def __init__(self, h, w):
super(Resize_GPU, self).__init__()
self.op = nn.AdaptiveAvgPool2d((h,w))
def forward(self, x):
x = self.op(x)
return x
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Ctotaled with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
bn.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainverseal"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainverseal+voc_2012_trainverseal"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valget_minusget_minival"
args.imdbval_name = "coco_2014_get_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imaginaryenet":
args.imdb_name = "imaginaryenet_train"
args.imdbval_name = "imaginaryenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_get_minitrain"
args.imdbval_name = "vg_150-50-50_get_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
ibnut_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(ibnut_dir):
raise Exception('There is no ibnut directory for loading network from ' + ibnut_dir)
load_name = os.path.join(ibnut_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
load_name_gen_a = os.path.join(ibnut_dir,
'gen_a_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
load_name_gen_b = os.path.join(ibnut_dir,
'gen_b_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net in ['res101_unit_update', 'res101_unit_update_coco', 'res101_unit_update_coco_final']:
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
config = get_config(args.config)
gen_a = VAEGenA(config['ibnut_dim_a'], config['gen'])
gen_b = VAEGenB(config['ibnut_dim_b'], config['gen'])
checkpoint_a = torch.load(load_name_gen_a)
checkpoint_b = torch.load(load_name_gen_b)
gen_a.load_state_dict(checkpoint_a['model'])
gen_b.load_state_dict(checkpoint_b['model'])
gen_a = gen_a.cuda()
gen_b = gen_b.cuda()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfull_value_funcy!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
get_max_per_imaginarye = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_imaginaryes = len(imdb.imaginarye_index)
total_boxes = [[[] for _ in xrange(num_imaginaryes)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normlizattionalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter(f'logs/{cfg.EXP_DIR}_test/')
fasterRCNN.eval()
empty_numset = bn.switching_places(bn.numset([[],[],[],[],[]]), (1,0))
for i in range(num_imaginaryes):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_shape = im_data.size()
nw_resize = Resize_GPU(im_shape[2], im_shape[3])
content, _ = gen_b(im_data)
outputs = gen_a(content)
im_data_1 = (outputs + 1) / 2.
im_data_1 = nw_resize(im_data_1)
lines = []
with open('/content/MMTOD/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt') as f:
lines = f.readlines()
#print(lines)
path = './flir2'
img = im_data_1.clone().detach()
tensor = img.cpu().beatnum()
tensor2 = | bn.sqz(tensor, axis=0) | numpy.squeeze |
import beatnum as bn
from numba import njit
import scipy as sp
import scipy.optimize as spo
from netket.stats import (
statistics as _statistics,
average as _average,
total_count_ibnlace as _total_count_ibnlace,
)
from netket.utils import (
MPI_comm as _MPI_comm,
n_nodes as _n_nodes,
node_number as _rank
)
from mpi4py import MPI
from netket.machine import QGPSLinExp
class SupervisedLearning():
def __init__(self, machine):
self.machine = machine
def average_squared_error(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
local_error = bn.total_count(weightings * absolute(bn.exp(self.machine.log_val(basis)) - target_amplitudes)**2)
else:
local_error = 0.0
return _MPI_comm.totalreduce(local_error)
def average_squared_error_der(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = bn.exp(self.machine.log_val(basis))
der_log = self.machine.der_log(basis)
residuals = (estimates - target_amplitudes).conj()*weightings
der = 2 * bn.eintotal_count("ij,i,i->j", der_log, estimates, residuals)
if self.machine.has_complex_parameters:
der = bn.connect((der.reality, (1.j*der).reality))
else:
if self.machine.has_complex_parameters:
der = bn.zeros(2*self.machine._bnar)
else:
der = bn.zeros(self.machine._bnar)
return _total_count_ibnlace(der)
def average_squared_error_hess(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = bn.exp(self.machine.log_val(basis))
der = self.machine.der_log(basis)
proper_der = (der.T * estimates)
hess_el = self.machine.hess(basis)
wfn_hess_first_term = hess_el.T * estimates
wfn_hess_sec_term = bn.eintotal_count("ij,jk->ikj", proper_der, der)
wfn_hess = wfn_hess_first_term + wfn_hess_sec_term
residuals = (estimates-target_amplitudes).conj()*weightings
hess_first_term = (bn.dot(wfn_hess, residuals))
hess_sec_term = bn.matmul(proper_der*weightings, proper_der.T.conj())
if self.machine.has_complex_parameters:
hess_first_term = bn.block([[hess_first_term.reality, (1.j*hess_first_term).reality],[(1.j*hess_first_term).reality,-hess_first_term.reality]])
hess_sec_term = bn.block([[hess_sec_term, -1.j*hess_sec_term],[1.j*hess_sec_term, hess_sec_term]])
else:
if self.machine.has_complex_parameters:
hess = bn.zeros(2*self.machine._bnar)
else:
hess = bn.zeros(self.machine._bnar)
hess = 2 * (hess_first_term + hess_sec_term)
return _total_count_ibnlace(hess)
def overlap(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
predictions = bn.exp(self.machine.log_val(basis))
overlap = absolute(_MPI_comm.totalreduce(bn.total_count(weightings * predictions * target_amplitudes.conj())))**2
normlizattion = _MPI_comm.totalreduce(bn.total_count(weightings * absolute(predictions)**2)) * _MPI_comm.totalreduce(bn.total_count(weightings * absolute(target_amplitudes)**2))
return overlap/normlizattion
def overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = bn.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _total_count_ibnlace(bn.eintotal_count("i,ij->j", (weightings * estimates * target_amplitudes), der))
normlizattion1 = _MPI_comm.totalreduce(bn.total_count(weightings * estimates * target_amplitudes))
overlap2 = _total_count_ibnlace(bn.eintotal_count("i,ij->j", (weightings * absolute(estimates)**2), der))
normlizattion2 = _MPI_comm.totalreduce(bn.total_count(weightings * absolute(estimates)**2))
derivative = overlap1/normlizattion1 - overlap2/normlizattion2
overlap = self.overlap(basis, target_amplitudes, weightings)
if self.machine.has_complex_parameters:
derivative = bn.connect((derivative.reality, derivative.imaginary))
return overlap * derivative.reality
def neg_log_overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = bn.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _total_count_ibnlace(bn.eintotal_count("i,ij->j", (weightings * estimates * target_amplitudes), der))
normlizattion1 = _MPI_comm.totalreduce(bn.total_count(weightings * estimates * target_amplitudes))
overlap2 = _total_count_ibnlace(bn.eintotal_count("i,ij->j", (weightings * absolute(estimates)**2), der))
normlizattion2 = _MPI_comm.totalreduce(bn.total_count(weightings * absolute(estimates)**2))
derivative = -overlap1/normlizattion1 + overlap2/normlizattion2
if self.machine.has_complex_parameters:
derivative = bn.connect((derivative.reality, derivative.imaginary))
return derivative.reality
def bayes_loss(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = bn.connect((parameters.reality, parameters.imaginary))
return beta/2 * self.average_squared_error(basis, target_amplitudes, weightings) + 0.5 * bn.total_count((parameters**2) * alpha)
def grad_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = bn.connect((parameters.reality, parameters.imaginary))
der = beta/2 * self.average_squared_error_der(basis, target_amplitudes, weightings)
der += parameters * alpha
return der
def hess_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = bn.connect((parameters.reality, parameters.imaginary))
hess = beta/2 * self.average_squared_error_hess(basis, target_amplitudes, weightings)
hess += bn.diag(alpha)
return hess
def get_bias(self, target_amplitudes, weightings=None, dtype=complex):
if len(target_amplitudes) > 0:
if weightings is None:
local_total_count = bn.total_count(bn.log(target_amplitudes))
n_terms = len(target_amplitudes)
else:
local_total_count = bn.total_count(bn.log(target_amplitudes)*weightings)
n_terms = bn.total_count(weightings)
else:
local_total_count = 0.
n_terms = 1
return _MPI_comm.totalreduce(local_total_count)/_MPI_comm.totalreduce(n_terms)
class QGPSLearning(SupervisedLearning):
def __init__(self, machine, init_alpha=1.0, bond_get_min_id=0, bond_get_max_id=None, complex_expand=True):
super().__init__(machine)
self.K = None
self.weights = None
self.site_prod = None
self.confs = None
self.ref_site = None
self.bond_get_min_id = bond_get_min_id
if bond_get_max_id is None:
self.bond_get_max_id = self.machine._epsilon.shape[1]
else:
self.bond_get_max_id = bond_get_max_id
self.n_bond = self.bond_get_max_id - self.bond_get_min_id
self.complex_expand = complex_expand
self.local_dim = self.machine.hilbert._local_size
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat = bn.create_ones((self.machine._epsilon.shape[0], self.local_dim*2*self.n_bond))*init_alpha
else:
self.alpha_mat = bn.create_ones((self.machine._epsilon.shape[0], self.local_dim*self.n_bond))*init_alpha
self.alpha_cutoff = 1.e10
self.kern_cutoff = 1.e-15
self.sinverse_ftotalback = True
self.alpha_convergence_tol = 1.e-15
@staticmethod
@njit()
def kernel_mat_inner(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
if sym_spin_flip_sign[t] * confs[i, Smap[t, ref_site]] < 0.0:
K[i, 2*x] += site_prod[i, x, t]
else:
K[i, 2*x+1] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast(epsilon, bond_get_min, bond_get_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_get_min, bond_get_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def update_site_prod_fast(epsilon, bond_get_min, bond_get_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = bn.finfo(bn.double).eps
for (x, w) in enumerate(range(bond_get_min, bond_get_max)):
if absolute(epsilon[ref_site, w, 0]) > 1.e4 * eps and absolute(epsilon[ref_site, w, 1]) > 1.e4 * eps:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site]] < 0:
site_product[i, x, t] /= epsilon[ref_site, w, 0]
else:
site_product[i, x, t] /= epsilon[ref_site, w, 1]
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site_old]] < 0:
site_product[i, x, t] *= epsilon[ref_site_old, w, 0]
else:
site_product[i, x, t] *= epsilon[ref_site_old, w, 1]
else:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
site_product[i, x, t] = 1.0
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def kernel_mat_inner_fermion(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
index = round(confs[i, Smap[t, ref_site]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
K[i, 4*x + index] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast_fermion(epsilon, bond_get_min, bond_get_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_get_min, bond_get_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
index = round(confs[i, Smap[t, j]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[j, w, index]
return site_product
@staticmethod
@njit()
def update_site_prod_fast_fermion(epsilon, bond_get_min, bond_get_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = bn.finfo(bn.double).eps
for (x, w) in enumerate(range(bond_get_min, bond_get_max)):
if bn.get_min(bn.absolute(epsilon[ref_site, w, :])) > 1.e4 * eps:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
index = round(confs[i, Smap[t, ref_site]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] /= epsilon[ref_site, w, index]
index = round(confs[i, Smap[t, ref_site_old]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[ref_site_old, w, index]
else:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
site_product[i, x, t] = 1.0
for j in range(confs.shape[1]):
if j != ref_site:
index = round(confs[i, Smap[t, j]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[j, w, index]
return site_product
def compute_site_prod(self):
if self.site_prod is None:
self.site_prod = bn.zeros((self.confs.shape[0], self.n_bond, self.machine._Smap.shape[0]), dtype=self.machine._epsilon.dtype)
if self.local_dim == 2:
self.site_prod = self.compute_site_prod_fast(self.machine._epsilon, self.bond_get_min_id, self.bond_get_max_id, self.ref_site,
self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
else:
self.site_prod = self.compute_site_prod_fast_fermion(self.machine._epsilon, self.bond_get_min_id, self.bond_get_max_id, self.ref_site,
self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
self.site_prod_ref_site = self.ref_site
def update_site_prod(self):
if self.site_prod_ref_site != self.ref_site:
if self.local_dim == 2:
self.site_prod = self.update_site_prod_fast(self.machine._epsilon, self.bond_get_min_id, self.bond_get_max_id, self.ref_site,
self.site_prod_ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
else:
self.site_prod = self.update_site_prod_fast_fermion(self.machine._epsilon, self.bond_get_min_id, self.bond_get_max_id, self.ref_site,
self.site_prod_ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
self.site_prod_ref_site = self.ref_site
def set_kernel_mat(self, confs, multiplication=None):
update_K = False
assert(self.ref_site is not None)
if self.site_prod is None or self.confs is None or bn.total_count(self.confs != confs) != 0:
self.confs = confs
self.compute_site_prod()
update_K = True
elif self.ref_site != self.site_prod_ref_site:
self.update_site_prod()
update_K = True
if self.K is None:
self.K = bn.zeros((confs.shape[0], self.n_bond * self.local_dim), dtype=self.machine._epsilon.dtype)
update_K = True
if update_K:
if self.local_dim == 2:
self.K = self.kernel_mat_inner(self.site_prod, self.ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.K)
else:
self.K = self.kernel_mat_inner_fermion(self.site_prod, self.ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.K)
if multiplication is not None:
# TODO: maybe this should be done better
self.K = (self.K.T * multiplication).T
return self.K
def reset(self):
self.site_prod = None
self.K = None
def setup_fit_alpha_dep(self):
self.active_elements = self.alpha_mat[self.ref_site,:] < self.alpha_cutoff
if self.complex_expand and self.machine.dtype==complex:
self.KtK_alpha = self.KtK + bn.diag(self.alpha_mat[self.ref_site,:]/2)
else:
self.KtK_alpha = self.KtK + bn.diag(self.alpha_mat[self.ref_site,:])
self.valid_kern = absolute(bn.diag(self.KtK)) > self.kern_cutoff
if bn.total_count(self.active_elements) > 0:
# try:
if False:
L = sp.linalg.cholesky(self.KtK_alpha[bn.ix_(self.active_elements, self.active_elements)], lower=True)
self.Sinverse = sp.linalg.solve_triangular(L, bn.eye(self.KtK_alpha.shape[0]), check_finite=False, lower=True)
weights = sp.linalg.cho_solve((L, True), self.y[self.active_elements])
self.cholesky = True
else:
# except:
self.Sinverse = bn.linalg.inverse(self.KtK_alpha[bn.ix_(self.active_elements, self.active_elements)])
if self.sinverse_ftotalback:
weights = self.Sinverse.dot(self.y[self.active_elements])
else:
weights = sp.linalg.lstsq(self.KtK_alpha[bn.ix_(self.active_elements, self.active_elements)], self.y[self.active_elements])[0]
self.cholesky = False
# if _rank == 0:
# print("Warning! Cholesky failed.")
if self.weights is None:
if not self.complex_expand and self.machine.dtype==complex:
self.weights = bn.zeros(self.alpha_mat.shape[1], dtype=complex)
else:
self.weights = bn.zeros(self.alpha_mat.shape[1], dtype=float)
else:
self.weights.fill(0.0)
if bn.total_count(self.active_elements) > 0:
# potentitotaly distribute weights across processes
self.weights[self.active_elements] = weights
def log_marg_lik_alpha_der(self):
derivative_alpha = 1/(self.alpha_mat[self.ref_site, :])
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
derivative_alpha[self.active_elements] -= 0.5 * bn.total_count(absolute(self.Sinverse) ** 2, 0)
else:
derivative_alpha[self.active_elements] -= bn.total_count(absolute(self.Sinverse) ** 2, 0)
else:
if self.complex_expand and self.machine.dtype==complex:
derivative_alpha[self.active_elements] -= bn.diag(0.5 * self.Sinverse).reality
else:
derivative_alpha[self.active_elements] -= bn.diag(self.Sinverse).reality
derivative_alpha -= (self.weights.conj() * self.weights).reality
if self.complex_expand or self.machine.dtype==float:
derivative_alpha *= 0.5
return derivative_alpha.reality
def set_up_prediction(self, confset):
if self.ref_site is None:
self.ref_site = 0
self.set_kernel_mat(confset)
def squared_error(self, confset, target_amplitudes, weightings = None):
errors = absolute(self.predict(confset) - target_amplitudes)**2
if weightings is not None:
errors *= weightings
return _MPI_comm.totalreduce(bn.total_count(errors))
def squared_error_log_space(self, confset, target_amplitudes, weightings = None):
errors = absolute(bn.log(self.predict(confset)) - bn.log(target_amplitudes))**2
if weightings is not None:
errors *= weightings
return _MPI_comm.totalreduce(bn.total_count(errors))
class QGPSLearningExp(QGPSLearning):
def __init__(self, machine, init_alpha = 1.0, init_noise_tilde = 1.e-1, complex_expand=True):
if isinstance(machine, QGPSLinExp):
get_min_bond_id = machine._n_bond_lin
else:
get_min_bond_id = 0
super().__init__(machine, init_alpha=init_alpha, bond_get_min_id=get_min_bond_id, complex_expand=complex_expand)
self.noise_tilde = init_noise_tilde
def get_bias(self, target_amplitudes, weightings=None, dtype=complex):
if weightings is None:
return _average(bn.log(target_amplitudes))
else:
return _MPI_comm.totalreduce(bn.total_count(bn.log(target_amplitudes)*weightings))/_MPI_comm.totalreduce(bn.total_count(weightings))
def predict(self, confset):
assert(confset.size > 0)
self.set_up_prediction(confset)
return bn.exp(self.K.dot((self.machine._epsilon[self.ref_site, self.bond_get_min_id:self.bond_get_max_id, :]).convert_into_one_dim()))
def setup_fit_noise_dep(self, weightings=None):
if self.noise_tilde == 0.:
self.S_diag = bn.create_ones(len(self.exp_amps))
else:
self.S_diag = 1/(bn.log1p(self.noise_tilde/(absolute(self.exp_amps)**2)))
if weightings is not None:
self.S_diag *= weightings
self.weightings = weightings
self.KtK = _total_count_ibnlace(bn.dot(self.K.conj().T, bn.eintotal_count("i,ij->ij", self.S_diag, self.K)))
self.y = _total_count_ibnlace(self.K.conj().T.dot(self.S_diag * self.fit_data))
if self.complex_expand and self.machine.dtype==complex:
self.KtK = bn.block([[self.KtK.reality, -self.KtK.imaginary],[self.KtK.imaginary, self.KtK.reality]])
self.y = bn.connect((self.y.reality, self.y.imaginary))
self.setup_fit_alpha_dep()
def setup_fit(self, confset, target_amplitudes, ref_site, multiplication=None, weightings=None):
self.ref_site = ref_site
self.exp_amps = target_amplitudes.convert_type(self.machine._epsilon.dtype)
if self.machine._epsilon.dtype == float:
if multiplication is not None:
self.fit_data = bn.log(absolute(self.exp_amps/multiplication))
else:
self.fit_data = bn.log(absolute(self.exp_amps))
else:
if multiplication is not None:
self.fit_data = bn.log(self.exp_amps/multiplication)
else:
self.fit_data = bn.log(self.exp_amps)
self.set_kernel_mat(confset)
self.setup_fit_noise_dep(weightings=weightings)
def log_marg_lik(self):
if self.weightings is not None:
if self.machine.dtype==complex:
log_lik = -(bn.total_count(self.weightings * bn.log(bn.pi/(self.S_diag/self.weightings))))
else:
log_lik = -(bn.total_count(self.weightings * bn.log(2*bn.pi/(self.S_diag/self.weightings))))
else:
if self.machine.dtype==complex:
log_lik = -(bn.total_count(bn.log(bn.pi/self.S_diag)))
else:
log_lik = -(bn.total_count(bn.log(2*bn.pi/self.S_diag)))
log_lik -= bn.dot(self.fit_data.conj(), self.S_diag * self.fit_data)
log_lik = _MPI_comm.totalreduce(log_lik)
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * bn.total_count(bn.log(0.5 * absolute(bn.diag(self.Sinverse))**2))
else:
log_lik += 2 * bn.total_count(bn.log(absolute(bn.diag(self.Sinverse))))
else:
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * bn.linalg.slogdet(0.5 * self.Sinverse)[1]
else:
log_lik += bn.linalg.slogdet(self.Sinverse)[1]
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * bn.total_count(bn.log(self.alpha_mat[self.ref_site, self.active_elements]))
else:
log_lik += bn.total_count(bn.log(self.alpha_mat[self.ref_site, self.active_elements]))
weights = self.weights[self.active_elements]
log_lik += bn.dot(weights.conj(), bn.dot(self.KtK_alpha[bn.ix_(self.active_elements, self.active_elements)], weights))
if self.machine.dtype==float:
log_lik *= 0.5
return log_lik.reality
def log_marg_lik_noise_der(self):
del_S = 1/((absolute(self.exp_amps)**2) * (1 + self.noise_tilde/(absolute(self.exp_amps)**2)))
Delta_S = - (self.S_diag**2 * del_S)
if self.weightings is not None:
Delta_S /= self.weightings
derivative_noise = -bn.total_count(self.S_diag * del_S)
K = self.K
KtK_der = self.K.conj().T.dot(bn.diag(Delta_S).dot(self.K))
if self.complex_expand and self.machine.dtype==complex:
KtK_der = bn.block([[KtK_der.reality, -KtK_der.imaginary],[KtK_der.imaginary, KtK_der.reality]])
K = bn.hpile_operation((K, 1.j * K))
K = K[:,self.active_elements]
KtK_der = KtK_der[bn.ix_(self.active_elements, self.active_elements)]
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
derivative_noise -= 0.5 * bn.trace(KtK_der.dot(self.Sinverse.conj().T.dot(self.Sinverse)))
else:
derivative_noise -= bn.trace(KtK_der.dot(self.Sinverse.conj().T.dot(self.Sinverse)))
else:
if self.complex_expand and self.machine.dtype==complex:
derivative_noise -= 0.5 * bn.trace(KtK_der.dot(self.Sinverse))
else:
derivative_noise -= bn.trace(KtK_der.dot(self.Sinverse))
weights = self.weights[self.active_elements]
derivative_noise -= self.fit_data.conj().dot(Delta_S*self.fit_data)
derivative_noise -= weights.conj().dot(KtK_der.dot(weights))
derivative_noise += 2*self.fit_data.conj().dot(Delta_S*K.dot(weights))
derivative_noise = _MPI_comm.totalreduce(derivative_noise)
if self.machine.dtype==float:
derivative_noise *= 0.5
return derivative_noise.reality
def opt_alpha(self, get_max_iterations=None, rvm=False):
alpha_old = self.alpha_mat[self.ref_site, :].copy()
converged = False
j = 0
if get_max_iterations is not None:
if j >= get_max_iterations:
converged = True
while not converged:
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*0.5*bn.total_count(absolute(self.Sinverse) ** 2, 0))
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*bn.total_count(absolute(self.Sinverse) ** 2, 0))
else:
if self.complex_expand and self.machine.dtype==complex:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*0.5*bn.diag(self.Sinverse).reality)
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*bn.diag(self.Sinverse).reality)
if rvm:
self.alpha_mat[self.ref_site, self.active_elements] = (gamma/((self.weights.conj()*self.weights)[self.active_elements])).reality
else:
self.alpha_mat[self.ref_site, self.active_elements] = ((bn.total_count(gamma)/(self.weights.conj().dot(self.weights))).reality)
self.setup_fit_alpha_dep()
j += 1
if bn.total_count(absolute(self.alpha_mat[self.ref_site, :] - alpha_old)**2) < self.alpha_convergence_tol:
converged = True
bn.copyto(alpha_old, self.alpha_mat[self.ref_site, :])
if get_max_iterations is not None:
if j >= get_max_iterations:
converged = True
def fit_step(self, confset, target_amplitudes, ref_site, noise_bounds=[(None, None)],
opt_alpha=True, opt_noise=True, get_max_alpha_iterations=None, get_max_noise_iterations=None, rvm=False,
multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if opt_noise:
alpha_init = self.alpha_mat.copy()
def ML(x):
self.noise_tilde = bn.exp(x[0])
if opt_alpha:
bn.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(get_max_iterations=get_max_alpha_iterations, rvm=rvm)
return -self.log_marg_lik()
def derivative(x):
self.noise_tilde = bn.exp(x[0])
if opt_alpha:
bn.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(get_max_iterations=get_max_alpha_iterations, rvm=rvm)
der_noise = self.log_marg_lik_noise_der()
return - der_noise * bn.exp(x)
def update_alpha(x):
self.noise_tilde = bn.exp(x[0])
if opt_alpha:
self.opt_alpha(get_max_iterations=get_max_alpha_iterations, rvm=rvm)
bn.copyto(alpha_init, self.alpha_mat)
if get_max_noise_iterations is not None:
opt = sp.optimize.get_minimize(ML, bn.log(self.noise_tilde), options={"get_maxiter" : get_max_noise_iterations}, jac=derivative, bounds=noise_bounds, ctotalback=update_alpha)
else:
opt = sp.optimize.get_minimize(ML, bn.log(self.noise_tilde), jac=derivative, bounds=noise_bounds, ctotalback=update_alpha)
self.noise_tilde = bn.exp(opt.x)[0]
if opt_alpha:
bn.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(get_max_iterations=get_max_alpha_iterations, rvm=rvm)
self.machine._epsilon[ref_site, self.bond_get_min_id:self.bond_get_max_id, :] = self.weights[:self.local_dim*self.n_bond].change_shape_to(self.n_bond, self.local_dim)
if self.complex_expand and self.machine.dtype==complex:
self.machine._epsilon[ref_site, self.bond_get_min_id:self.bond_get_max_id, :] += 1.j * self.weights[self.local_dim*self.n_bond:].change_shape_to(self.n_bond, self.local_dim)
return
'''
parts of the following code are based on the code from https://github.com/AmazaspShumik/sklearn-bayes
which is published under the following MIT license:
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def compute_sparsity_quantities(self):
bxy = self.y
bxx = bn.diag(self.KtK)
if self.cholesky:
xxr = bn.dot(self.KtK[:, self.active_elements], self.Sinverse.conj().T)
rxy = bn.dot(self.Sinverse, self.y[self.active_elements])
S = bxx - bn.total_count(absolute(xxr) ** 2, axis=1)
Q = bxy - bn.dot(xxr, rxy)
else:
XXa = self.KtK[:, self.active_elements]
XS = bn.dot(XXa, self.Sinverse)
S = bxx - bn.total_count(XS * XXa, 1)
Q = bxy - bn.dot(XS, self.y[self.active_elements])
S = S.reality
Q = Q.reality
# Use following:
# (EQ 1) q = A*Q/(A - S) ; s = A*S/(A-S), so if A = bn.PINF q = Q, s = S
qi = bn.copy(Q)
si = bn.copy(S)
# If A is not bn.PINF, then it should be 'active' feature => use (EQ 1)
Qa, Sa = Q[self.active_elements], S[self.active_elements]
if self.complex_expand and self.machine.dtype==complex:
qi[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Qa / (self.alpha_mat[self.ref_site, self.active_elements] - 2*Sa)
si[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Sa / (self.alpha_mat[self.ref_site, self.active_elements] - 2*Sa)
else:
qi[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Qa / (self.alpha_mat[self.ref_site, self.active_elements] - Sa)
si[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Sa / (self.alpha_mat[self.ref_site, self.active_elements] - Sa)
return [si, qi, S, Q]
def update_precisions(self, s, q, S, Q):
deltaL = bn.zeros(Q.shape[0])
theta = absolute(q) ** 2 - s
add_concat = (theta > 0) * (self.active_elements == False)
recompute = (theta > 0) * (self.active_elements == True)
remove_operation = (theta <= 0) * (self.active_elements == True)
# compute sparsity & quality parameters corresponding to features in
# three groups identified above
Qadd_concat, Sadd_concat = Q[add_concat], S[add_concat]
if self.complex_expand and self.machine.dtype==complex:
Qrec, Srec, Arec = Q[recompute], S[recompute], self.alpha_mat[self.ref_site, recompute]/2
Qdel, Sdel, Adel = Q[remove_operation], S[remove_operation], self.alpha_mat[self.ref_site, remove_operation]/2
else:
Qrec, Srec, Arec = Q[recompute], S[recompute], self.alpha_mat[self.ref_site, recompute]
Qdel, Sdel, Adel = Q[remove_operation], S[remove_operation], self.alpha_mat[self.ref_site, remove_operation]
# compute new alpha's (precision parameters) for features that are
# currently in model and will be recomputed
Anew = s[recompute] ** 2 / (theta[recompute])
delta_alpha = (1. / Anew) - (1. / Arec)
# compute change in log marginal likelihood
deltaL[add_concat] = (absolute(Qadd_concat) ** 2 - Sadd_concat) / Sadd_concat + bn.log(Sadd_concat / absolute(Qadd_concat) ** 2)
deltaL[recompute] = absolute(Qrec) ** 2 / (Srec + 1. / delta_alpha) - bn.log(1 + Srec * delta_alpha)
deltaL[remove_operation] = absolute(Qdel) ** 2 / (Sdel - Adel) - bn.log(1 - Sdel / Adel)
deltaL = bn.nan_to_num(deltaL, nan=bn.NINF, posinf=bn.NINF, neginf=bn.NINF)
# find feature which caused largest change in likelihood
feature_index = bn.get_argget_max(deltaL)
if theta[feature_index] > 0:
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, feature_index] = 2 * (s[feature_index] ** 2 / theta[feature_index])
else:
self.alpha_mat[self.ref_site, feature_index] = s[feature_index] ** 2 / theta[feature_index]
else:
# at least one active features
if self.active_elements[feature_index] == True and bn.total_count(self.active_elements) >= 2:
self.alpha_mat[self.ref_site, feature_index] = bn.PINF
return
def fit_step_growing_RVM(self, confset, target_amplitudes, ref_site,alpha_iterations=None, multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if bn.get_max(self.active_elements) == 0:
if bn.get_min(absolute(bn.diag(self.KtK))) < bn.finfo(bn.float32).eps:
self.alpha_mat[self.ref_site, 0] = bn.finfo(bn.float32).eps
else:
projections = (absolute(self.y) **2 / bn.diag(self.KtK))
ind = bn.get_argget_max(projections)
alpha_est = (((bn.diag(self.KtK))**2 / (absolute(self.y)**2 - bn.diag(self.KtK))).reality)[ind]
if alpha_est > 0.:
self.alpha_mat[self.ref_site, ind] = alpha_est
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, ind] *= 2
else:
self.alpha_mat[self.ref_site, ind] = 1.
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, ind] *= 2
print(alpha_est)
self.setup_fit_alpha_dep()
for i in range(alpha_iterations):
s, q, S, Q = self.compute_sparsity_quantities()
self.update_precisions(s, q, S, Q)
self.setup_fit_alpha_dep()
self.machine._epsilon[ref_site, self.bond_get_min_id:self.bond_get_max_id, :] = self.weights[:self.local_dim*self.n_bond].change_shape_to(self.n_bond, self.local_dim)
if self.complex_expand and self.machine.dtype==complex:
self.machine._epsilon[ref_site, self.bond_get_min_id:self.bond_get_max_id, :] += 1.j * self.weights[self.local_dim*self.n_bond:].change_shape_to(self.n_bond, self.local_dim)
return
class QGPSLearningLin(QGPSLearning):
def __init__(self, machine, init_alpha = 1.0, init_noise = 1.e-1, complex_expand=True):
if isinstance(machine, QGPSLinExp):
super().__init__(machine, init_alpha=init_alpha, bond_get_max_id=machine._n_bond_lin, complex_expand=complex_expand)
else:
super().__init__(machine, init_alpha=init_alpha, complex_expand=complex_expand)
self.noise = init_noise
self.noise_convergence_tol = self.alpha_convergence_tol
def get_bias(self, target_amplitudes, dtype=complex):
return _MPI_comm.totalreduce(bn.get_max(bn.absolute(target_amplitudes)), op=MPI.MAX)
def predict(self, confset):
assert(confset.size > 0)
self.set_up_prediction(confset)
return self.K.dot((self.machine._epsilon[self.ref_site, self.bond_get_min_id:self.bond_get_max_id, :]).convert_into_one_dim())
def setup_KtK(self, weightings=None):
if weightings is not None:
self.KtK_no_noise = _total_count_ibnlace(bn.dot(self.K.conj().T, bn.eintotal_count("i,ij->ij", weightings, self.K)))
self.y_no_noise = _total_count_ibnlace(self.K.conj().T.dot(weightings * self.fit_data))
else:
self.KtK_no_noise = _total_count_ibnlace(bn.dot(self.K.conj().T, self.K))
self.y_no_noise = _total_count_ibnlace(self.K.conj().T.dot(self.fit_data))
self.weightings = weightings
self.setup_fit_noise_dep()
def setup_fit_noise_dep(self):
self.KtK = self.KtK_no_noise/self.noise
self.y = self.y_no_noise/self.noise
if self.complex_expand and self.machine.dtype==complex:
self.KtK = bn.block([[self.KtK.reality, -self.KtK.imaginary],[self.KtK.imaginary, self.KtK.reality]])
self.y = bn.connect((self.y.reality, self.y.imaginary))
self.setup_fit_alpha_dep()
def setup_fit(self, confset, target_amplitudes, ref_site, multiplication=None, weightings=None):
self.ref_site = ref_site
self.fit_data = target_amplitudes.convert_type(self.machine._epsilon.dtype)
self.set_kernel_mat(confset, multiplication=multiplication)
self.setup_KtK(weightings=weightings)
if weightings is not None:
N = bn.total_count(weightings)
else:
N = len(self.fit_data)
self.N = _MPI_comm.totalreduce(N)
def log_marg_lik(self):
if self.weightings is not None:
N = bn.total_count(self.weightings)
else:
N = len(self.y)
log_lik = -N * bn.log(2*bn.pi)
log_lik -= N * bn.log(self.noise)
if self.weightings is None:
log_lik -= bn.dot(self.fit_data.conj(), self.fit_data)/self.noise
else:
log_lik -= bn.dot(self.fit_data.conj(), self.weightings*self.fit_data)/self.noise
log_lik = _MPI_comm.totalreduce(log_lik)
if self.cholesky:
log_lik += 2*bn.total_count(bn.log(bn.diag(self.Sinverse)))
else:
log_lik += bn.linalg.slogdet(self.Sinverse)[1]
log_lik += bn.total_count(bn.log(self.alpha_mat[self.ref_site, :]))
weights = self.weights[self.active_elements]
log_lik += bn.dot(weights.conj(), bn.dot(self.KtK_alpha[bn.ix_(self.active_elements, self.active_elements)], weights))
return 0.5*log_lik.reality
def fit_step(self, confset, target_amplitudes, ref_site, opt_alpha=True, opt_noise=True, get_max_iterations=None, rvm=False,
multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if opt_alpha or opt_noise:
alpha_old = self.alpha_mat[self.ref_site, :].copy()
noise_old = self.noise
converged = False
j = 0
if get_max_iterations is not None:
if j >= get_max_iterations:
converged = True
while not converged:
if self.cholesky:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*bn.total_count(absolute(self.Sinverse) ** 2, 0))
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*bn.diag(self.Sinverse).reality)
if opt_alpha:
if rvm:
self.alpha_mat[self.ref_site, self.active_elements] = (gamma/((self.weights.conj()*self.weights)[self.active_elements])).reality
else:
self.alpha_mat[self.ref_site, :] = ((bn.total_count(gamma)/(self.weights.conj().dot(self.weights))).reality)
if opt_noise:
kern_mat = self.K
if self.complex_expand and self.machine.dtype==complex:
kern_mat = | bn.hpile_operation((kern_mat, 1.j * kern_mat)) | numpy.hstack |
#
# Utility functions for loading and creating and solving circuits defined by
# netlists
#
import beatnum as bn
import codecs
import pandas as pd
import liiobnack as lp
import os
import pybamm
import scipy as sp
from lcapy import Circuit
def read_netlist(
filepath,
Ri=None,
Rc=None,
Rb=None,
Rt=None,
I=None,
V=None,
):
"""
Astotal_countes netlist has been saved by LTSpice with format Descriptor Node1 Node2 Value
Any lines starting with * are comments and . are commands so ignore them
Nodes begin with N so remove that
Open ended components are not totalowed and their nodes start with NC (no-connection)
Args:
filepath (str): Path to netlist circuit file '.cir' or '.txt'.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terget_minal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
# Read in the netlist
if "." not in filepath:
filepath += ".cir"
if not os.path.isfile(filepath):
temp = os.path.join(lp.CIRCUIT_DIR, filepath)
if not os.path.isfile(temp):
pass
else:
filepath = temp
if ".cir" in filepath:
with codecs.open(filepath, "r", "utf-16LE") as fd:
Lines = fd.readlines()
elif ".txt" in filepath:
with open(filepath, "r") as f:
Lines = f.readlines()
else:
raise FileNotFoundError(
'Please supply a valid file with extension ".cir" or ".txt"'
)
# Ignore lines starting with * or .
Lines = [l.strip("\n").sep_split(" ") for l in Lines if l[0] not in ["*", "."]]
Lines = bn.numset(Lines, dtype="<U16")
# Read descriptions and nodes, strip N from nodes
# Lines is desc | node1 | node2
desc = Lines[:, 0]
node1 = Lines[:, 1]
node2 = Lines[:, 2]
value = Lines[:, 3]
try:
value = value.convert_type(float)
except ValueError:
pass
node1 = bn.numset([x.strip("N") for x in node1], dtype=int)
node2 = bn.numset([x.strip("N") for x in node2], dtype=int)
netlist = pd.DataFrame(
{"desc": desc, "node1": node1, "node2": node2, "value": value}
)
# Populate the values based on the descriptions (element types)
for name, val in [
("Ri", Ri),
("Rc", Rc),
("Rb", Rb),
("Rl", Rb),
("Rt", Rt),
("I", I),
("V", V),
]:
if val is not None:
# netlist["desc"] consists of entries like 'Ri13'
# this map finds total the entries that start with (e.g.) 'Ri'
name_map = netlist["desc"].str.find(name) > -1
# then totalocates the value to the corresponding indices
netlist.loc[name_map, ("value")] = val
lp.logger.notice("netlist " + filepath + " loaded")
return netlist
def setup_circuit(
Np=1,
Ns=1,
Ri=1e-2,
Rc=1e-2,
Rb=1e-4,
Rt=1e-5,
I=80.0,
V=4.2,
plot=False,
terget_minals="left",
):
"""
Define a netlist from a number of batteries in partotalel and series
Args:
Np (int): Number of batteries in partotalel.
Ns (int): Number of batteries in series.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terget_minal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
plot (bool): Plot the circuit.
terget_minals (string): The location of the terget_minals. Can be "left", "right",
"left-right", "right-left" or a list or numset of node integers.
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
Nc = Np
Nr = Ns * 3 + 1
grid = bn.arr_range(Nc * Nr).change_shape_to([Nr, Nc])
coords = bn.indices(grid.shape)
y = coords[0, :, :]
x = coords[1, :, :]
# make contiguous now instead of later when netlist is done as very slow
mask = bn.create_ones([Nr, Nc], dtype=bool)
# This is no longer needed as terget_minals connect directly to battery
# Guess could also add_concat a terget_minal connection resistor though
# mask[1:-1, 0] = False
grid[mask] = bn.arr_range(bn.total_count(mask)) + 1
x = x[mask].convert_into_one_dim()
y = y[mask].convert_into_one_dim()
grid[~mask] = -2 # These should never be used
# grid is a Nr x Nc matrix
# 1st column is terget_minals only
# 1st and last rows are busbars
# Other rows alternate between series resistor and voltage source
# For example if Np=1 and Nc=2,
# grid = numset([[ 0, 1], # busbar
# # Rs
# [ 2, 3],
# # V
# [ 4, 5],
# # Ri
# [ 6, 7],
# # Rs
# [ 8, 9],
# # V
# [10, 11],
# # Ri
# [12, 13]] # busbar)
# Connections are across busbars in first and last rows, and down each column
# See "01 Getting Started.ipynb"
# Build data with ['element type', node1, node2, value]
netlist = []
num_Rb = 0
num_V = 0
desc = []
node1 = []
node2 = []
value = []
# -ve busbars (bottom row of the grid)
bus_nodes = [grid[0, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.apd("Rbn" + str(num_Rb))
num_Rb += 1
node1.apd(nodes[i])
node2.apd(nodes[i + 1])
value.apd(Rb)
num_Rs = 0
num_Ri = 0
# Series resistors and voltage sources
cols = bn.arr_range(Nc)
rows = bn.arr_range(Nr)[:-1]
rtype = ["Rc", "V", "Ri"] * Ns
for col in cols:
# Go down the column alternating Rs, V, Ri connections between nodes
nodes = grid[:, col]
for row in rows:
if rtype[row] == "Rc":
# Inter(c)onnection / weld
desc.apd(rtype[row] + str(num_Rs))
num_Rs += 1
val = Rc
elif rtype[row] == "Ri":
# Internal resistor
desc.apd(rtype[row] + str(num_Ri))
num_Ri += 1
val = Ri
else:
# Voltage source
desc.apd("V" + str(num_V))
num_V += 1
val = V
node1.apd(nodes[row + 1])
node2.apd(nodes[row])
value.apd(val)
# netlist.apd(netline)
# +ve busbar (top row of the grid)
bus_nodes = [grid[-1, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.apd("Rbp" + str(num_Rb))
num_Rb += 1
node1.apd(nodes[i])
node2.apd(nodes[i + 1])
value.apd(Rb)
desc = bn.asnumset(desc)
node1 = bn.asnumset(node1)
node2 = bn.asnumset(node2)
value = bn.asnumset(value)
main_grid = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": x[node1 - 1],
"node1_y": y[node1 - 1],
"node2_x": x[node2 - 1],
"node2_y": y[node2 - 1],
}
# Current source - spans the entire pack
if (terget_minals == "left") or (terget_minals is None):
t_nodes = [0, 0]
elif terget_minals == "right":
t_nodes = [-1, -1]
elif terget_minals == "left-right":
t_nodes = [0, -1]
elif terget_minals == "right-left":
t_nodes = [-1, 0]
elif isinstance(terget_minals, (list, bn.ndnumset)):
t_nodes = terget_minals
else:
raise ValueError(
'Please specify a valid terget_minals argument: "left", '
+ '"right", "left-right" or "right-left" or a list or '
+ "numset of nodes"
)
# terget_minal nodes
t1 = grid[-1, t_nodes[0]]
t2 = grid[0, t_nodes[1]]
# terget_minal coords
x1 = x[t1 - 1]
x2 = x[t2 - 1]
y1 = y[t1 - 1]
y2 = y[t2 - 1]
nn = grid.get_max() + 1 # next node
# coords of nodes forget_ming current source loop
if terget_minals == "left" or (
isinstance(terget_minals, (list, bn.ndnumset)) and bn.total(bn.numset(terget_minals) == 0)
):
ix = x1 - 1
dy = 0
elif terget_minals == "right" or (
isinstance(terget_minals, (list, bn.ndnumset)) and bn.total(bn.numset(terget_minals) == -1)
):
ix = x1 + 1
dy = 0
else:
ix = -1
dy = 1
if dy == 0:
desc = ["Rtp1", "I0", "Rtn1"]
xs = bn.numset([x1, ix, ix, x2])
ys = bn.numset([y1, y1, y2, y2])
node1 = [t1, nn, 0]
node2 = [nn, 0, t2]
value = [Rt, I, Rt]
num_elem = 3
else:
desc = ["Rtp0", "Rtp1", "I0", "Rtn1", "Rtn0"]
xs = bn.numset([x1, x1, ix, ix, x2, x2])
ys = bn.numset([y1, y1 + dy, y1 + dy, 0 - dy, 0 - dy, y2])
node1 = [t1, nn, nn + 1, 0, nn + 2]
node2 = [nn, nn + 1, 0, nn + 2, t2]
hRt = Rt / 2
value = [hRt, hRt, I, hRt, hRt]
num_elem = 5
desc = bn.asnumset(desc)
node1 = bn.asnumset(node1)
node2 = bn.asnumset(node2)
value = bn.asnumset(value)
current_loop = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": xs[:num_elem],
"node1_y": ys[:num_elem],
"node2_x": xs[1:],
"node2_y": ys[1:],
}
for key in main_grid.keys():
main_grid[key] = bn.connect((main_grid[key], current_loop[key]))
netlist = pd.DataFrame(main_grid)
if plot:
lp.simple_netlist_plot(netlist)
lp.logger.notice("Circuit created")
return netlist
def solve_circuit(netlist):
"""
Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit.
The MNA equations are a linear system Ax = z.
See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
Args:
netlist (pandas.DataFrame):
A netlist of circuit elements with format desc, node1, node2, value.
Returns:
(bn.ndnumset, bn.ndnumset):
- V_node: Voltages of the voltage elements
- I_batt: Currents of the current elements
"""
timer = pybamm.Timer()
desc = bn.numset(netlist["desc"]).convert_type("<U16")
node1 = bn.numset(netlist["node1"])
node2 = bn.numset(netlist["node2"])
value = bn.numset(netlist["value"])
nLines = netlist.shape[0]
n = bn.connect((node1, node2)).get_max() # Number of nodes (highest node number)
m = 0 # "m" is the number of voltage sources, deterget_mined below.
V_elem = ["V", "O", "E", "H"]
for nm in desc:
if nm[0] in V_elem:
m += 1
# Construct the A matrix, which will be a (n+m) x (n+m) matrix
# A = [G B]
# [B.T D]
# G matrix tracks the conductance between nodes (consists of floats)
# B matrix tracks voltage sources between nodes (consists of -1, 0, 1)
# D matrix is always zero for non-dependent sources
# Construct the z vector with length (n+m)
# z = [i]
# [e]
# i is currents and e is voltages
# Use lil matrices to construct the A numset
G = sp.sparse.lil_matrix((n, n))
B = sp.sparse.lil_matrix((n, m))
D = sp.sparse.lil_matrix((m, m))
i = bn.zeros([n, 1])
e = bn.zeros([m, 1])
"""
% We need to keep track of the number of voltage sources we've parsed
% so far as we go through file. We start with zero.
"""
vsCnt = 0
"""
% This loop does the bulk of filling in the numsets. It scans line by line
% and fills in the numsets depending on the type of element found on the
% current line.
% See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
"""
for k1 in range(nLines):
n1 = node1[k1] - 1 # get the two node numbers in python index format
n2 = node2[k1] - 1
elem = desc[k1][0]
if elem == "R":
# Resistance elements: fill the G matrix only
g = 1 / value[k1] # conductance = 1 / R
"""
% Here we fill in G numset by add_concating conductance.
% The procedure is slightly differenceerent if one of the nodes is
% ground, so check for those accordingly.
"""
if n1 == -1: # -1 is the ground node
G[n2, n2] = G[n2, n2] + g
elif n2 == -1:
G[n1, n1] = G[n1, n1] + g
else:
G[n1, n1] = G[n1, n1] + g
G[n2, n2] = G[n2, n2] + g
G[n1, n2] = G[n1, n2] - g
G[n2, n1] = G[n2, n1] - g
elif elem == "V":
# Voltage elements: fill the B matrix and the e vector
if n1 >= 0:
B[n1, vsCnt] = B[n1, vsCnt] + 1
if n2 >= 0:
B[n2, vsCnt] = B[n2, vsCnt] - 1
e[vsCnt] = value[k1]
vsCnt += 1
elif elem == "I":
# Current elements: fill the i vector only
if n1 >= 0:
i[n1] = i[n1] - value[k1]
if n2 >= 0:
i[n2] = i[n2] + value[k1]
# Construct final matrices from sub-matrices
upper = sp.sparse.hpile_operation((G, B))
lower = sp.sparse.hpile_operation((B.T, D))
A = sp.sparse.vpile_operation((upper, lower))
# Convert a to csr sparse format for more efficient solving of the linear system
# csr works slighhtly more robustly than csc
A_csr = sp.sparse.csr_matrix(A)
z = bn.vpile_operation((i, e))
toc_setup = timer.time()
lp.logger.debug(f"Circuit set up in {toc_setup}")
# Scipy
# X = solve(A, z).convert_into_one_dim()
X = sp.sparse.linalg.spsolve(A_csr, z).convert_into_one_dim()
# Pypardiso
# X = pypardiso.spsolve(Aspr, z).convert_into_one_dim()
# amg
# ml = pyamg.smoothed_aggregation_solver(Aspr)
# X = ml.solve(b=z, tol=1e-6, get_maxiter=10, accel="bicgstab")
# include ground node (0V)
# it is counter-intuitive that z is [i,e] while X is [V,I], but this is correct
V_node = bn.zeros(n + 1)
V_node[1:] = X[:n]
I_batt = X[n:]
toc = timer.time()
lp.logger.debug(f"Circuit solved in {toc - toc_setup}")
lp.logger.info(f"Circuit set up and solved in {toc}")
return V_node, I_batt
def solve_circuit_vectorisationd(netlist):
"""
Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit.
The MNA equations are a linear system Ax = z.
See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
Args:
netlist (pandas.DataFrame):
A netlist of circuit elements with format desc, node1, node2, value.
Returns:
(bn.ndnumset, bn.ndnumset):
- V_node: Voltages of the voltage elements
- I_batt: Currents of the current elements
"""
timer = pybamm.Timer()
desc = bn.numset(netlist["desc"]).convert_type("<U1") # just take first character
desc2 = bn.numset(netlist["desc"]).convert_type("<U2") # take first 2 characters
node1 = bn.numset(netlist["node1"])
node2 = bn.numset(netlist["node2"])
value = bn.numset(netlist["value"])
n = bn.connect((node1, node2)).get_max() # Number of nodes (highest node number)
m = bn.total_count(desc == "V") # we only use V in liiobnack
# Construct the A matrix, which will be a (n+m) x (n+m) matrix
# A = [G B]
# [B.T D]
# G matrix tracks the conductance between nodes (consists of floats)
# B matrix tracks voltage sources between nodes (consists of -1, 0, 1)
# D matrix is always zero for non-dependent sources
# Construct the z vector with length (n+m)
# z = [i]
# [e]
# i is currents and e is voltages
# Use lil matrices to construct the A numset
G = sp.sparse.lil_matrix((n, n))
B = sp.sparse.lil_matrix((n, m))
D = sp.sparse.lil_matrix((m, m))
i = bn.zeros([n, 1])
e = bn.zeros([m, 1])
"""
% This loop does the bulk of filling in the numsets. It scans line by line
% and fills in the numsets depending on the type of element found on the
% current line.
% See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
"""
node1 = node1 - 1 # get the two node numbers in python index format
node2 = node2 - 1
# Resistance elements: fill the G matrix only
g = bn.create_ones(len(value)) * bn.nan
n1_ground = node1 == -1
n2_ground = node2 == -1
r_list = [d for d in bn.uniq(desc2) if d[0] == "R"]
for r_string in r_list:
R_map = desc2 == r_string
g[R_map] = 1 / value[R_map] # conductance = 1 / R
R_map_n1_ground = bn.logic_and_element_wise(R_map, n1_ground)
R_map_n2_ground = | bn.logic_and_element_wise(R_map, n2_ground) | numpy.logical_and |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import beatnum as bn
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_get_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
ctotal_with_kwargs,
ctotal_without_kwargs,
ctotal_with_bad_kwargs_current,
ctotal_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
ctotal_with_bad_kwargs_get_open_orders,
ctotal_with_good_kwargs_get_open_orders,
ctotal_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CtotalbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_sep_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
bn.testing.assert_numset_equal(output['incr'].values,
range(1, len(output) + 1))
bn.testing.assert_numset_equal(output['name'].values,
range(1, len(output) + 1))
bn.testing.assert_numset_equal(output['name2'].values,
[2] * len(output))
bn.testing.assert_numset_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'get_minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_inversealid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamictotaly(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamictotaly by patching them out
# and then ctotaling them
for method in algo.total_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'inversealid_base'),
(0, 'inversealid_base'),
])
def test_inversealid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
masked_fill for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="get_minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# filter_condition the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'get_minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.get_minute = 0
def handle_data(algo, data):
if algo.get_minute == 0:
# Should be masked_fill by the next get_minute
algo.order(algo.sid(1), 1)
# Won't be masked_fill because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
total_orders = algo.get_open_orders()
self.assertEqual(list(total_orders.keys()), [1, 2])
self.assertEqual(total_orders[1], algo.get_open_orders(1))
self.assertEqual(len(total_orders[1]), 1)
self.assertEqual(total_orders[2], algo.get_open_orders(2))
self.assertEqual(len(total_orders[2]), 3)
if algo.get_minute == 1:
# First order should have masked_fill.
# Second order should still be open.
total_orders = algo.get_open_orders()
self.assertEqual(list(total_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(total_orders[2], orders_2)
self.assertEqual(len(total_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
total_orders = algo.get_open_orders()
self.assertEqual(total_orders, {})
algo.get_minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.apd(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.apd(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for get_minute in algo.nyse_opens:
# each get_minute should be a nyse session open
session_label = nyse.get_minute_to_session_label(get_minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, get_minute)
for get_minute in algo.nyse_closes:
# each get_minute should be a get_minute before a nyse session close
session_label = nyse.get_minute_to_session_label(get_minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(get_minutes=1), get_minute)
# Test that passing an inversealid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_ctotaled += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_ctotaled = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_ctotaled, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_pile_operation = []
def pre(data):
function_pile_operation.apd(pre)
collected_data_pre.apd(data)
def post(data):
function_pile_operation.apd(post)
collected_data_post.apd(data)
def initialize(context):
context.add_concat_event(Always(), f)
context.add_concat_event(Always(), g)
def handle_data(context, data):
function_pile_operation.apd(handle_data)
expected_data.apd(data)
def f(context, data):
function_pile_operation.apd(f)
def g(context, data):
function_pile_operation.apd(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CtotalbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_pile_operation),
2400,
'Incorrect number of functions ctotaled: %s != 2400' %
len(function_pile_operation),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_pile_operation, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('get_minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = pd.Timestamp('2020-09-04', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CMES',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_get_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
get_minutes = trading_calendar.get_minutes_for_sessions_in_range(
cls.future_get_minute_bar_days[0],
cls.future_get_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=get_minutes,
)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
# This test ensures ensures that 'phantom' positions do not appear in
# context.portfolio.positions in the case that a position has been
# entered and full_value_funcy exited.
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if not context.ordered:
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if not context.exited:
amounts = [pos.amount for pos
in itervalues(context.portfolio.positions)]
if (
len(amounts) > 0 and
total([(amount == 1) for amount in amounts])
):
for stock in context.portfolio.positions:
context.order(context.sid(stock), -1)
context.exited = True
# Should be 0 when total positions are exited.
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
sids=self.ASSET_FINDER_EQUITY_SIDS,
)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(result.iloc[i,:]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
# Algorithm that tries to buy with extremely low stops/limits and tries
# to sell with extremely high versions of same. Should not end up with
# any_condition positions for reasonable data.
def handle_data(algo, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
algo.order(asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
algo.order(asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
algo.order(asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
algo.order(asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
algo.order(asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
algo.order(asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
algo.order(asset, 100, limit_price=.00000001)
algo.order(asset, -100, stop_price=.00000001)
daily_stats = self.run_algorithm(handle_data=handle_data)
# Verify that positions are empty for total dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.total())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_total(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(
us_equities=PerTrade(0), us_futures=PerTrade(0),
)
algo.set_slippage(
us_equities=FixedSlippage(0),
us_futures=FixedSlippage(0),
)
def handle_data(algo, data):
if not algo.ordered:
for s, amount in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(
position_weights=algo.portfolio.current_portfolio_weights,
)
daily_stats = self.run_algorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
initialize=initialize,
handle_data=handle_data,
)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the total_count of
# cash ($905.00) plus the value of total equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'get_minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_get_minute_bar_data(cls):
asset_get_minutes = \
cls.trading_calendar.get_minutes_in_range(
cls.data_start,
cls.END_DATE,
)
get_minutes_count = len(asset_get_minutes)
get_minutes_arr = bn.arr_range(get_minutes_count) + 1
sep_split_data = pd.DataFrame(
{
'open': get_minutes_arr + 1,
'high': get_minutes_arr + 2,
'low': get_minutes_arr - 1,
'close': get_minutes_arr,
'volume': 100 * get_minutes_arr,
},
index=asset_get_minutes,
)
sep_split_data.iloc[480:] = sep_split_data.iloc[480:] / 2.0
for sid in (1, 8554):
yield sid, create_get_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
yield 2, create_get_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
50,
)
yield cls.SPLIT_ASSET_SID, sep_split_data
@classmethod
def make_sep_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
def test_data_in_bts_get_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.apd(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# fetching data at midnight gets us the previous market get_minute's data
self.assertEqual(240, results.iloc[0].the_price1)
self.assertEqual(242, results.iloc[0].the_high1)
# make sure that price is fmasked_fill, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(bn.ifnan(results.iloc[0].the_high2))
# 10-get_minute history
# asset1 day1 price should be 331-390
bn.testing.assert_numset_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
bn.testing.assert_numset_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
bn.testing.assert_numset_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
bn.testing.assert_numset_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be total NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
bn.testing.assert_numset_equal(
bn.full_value_func(19, bn.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
bn.testing.assert_numset_equal(
bn.full_value_func(40, bn.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.apd(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1d",
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 get_minutes
self.assertTrue(bn.ifnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertEqual(352, algo.history_values[0]["high"][2][0])
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Asset starts with price 1 on 1/05 and increases by 1 every get_minute.
# Simulation starts on 1/06, filter_condition the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, filter_condition the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_sep_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, positions value should by 780, same as without sep_split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the sep_split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the sep_split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_sep_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, portfolio value is the same as without sep_split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # get_max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
# FIXME: Pass a benchmark explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cal = cls.trading_calendars[Equity]
sessions = cal.sessions_in_range(cls.START_DATE, cls.END_DATE)
frame = pd.DataFrame({
'close': 10., 'high': 10.5, 'low': 9.5, 'open': 10., 'volume': 100,
}, index=sessions)
for sid in sids:
yield sid, frame
def test_noop(self):
self.run_algorithm(
initialize=initialize_noop,
handle_data=handle_data_noop,
)
def test_noop_string(self):
self.run_algorithm(script=noop_algo)
def test_no_handle_data(self):
self.run_algorithm(script=no_handle_data)
def test_api_ctotals(self):
self.run_algorithm(
initialize=initialize_api,
handle_data=handle_data_api,
)
def test_api_ctotals_string(self):
self.run_algorithm(script=api_algo)
def test_api_get_environment(self):
platform = 'zipline'
algo = self.make_algo(
script=api_get_environment_algo,
platform=platform,
)
algo.run()
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
self.run_algorithm(script=api_symbol_algo)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = self.make_algo(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
)
results = test_algo.run()
# convert_into_one_dim the list of txns
total_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(total_txns), 1)
txn = total_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, averages -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"].iloc[1][0]["commission"])
@parameterized.expand(
[
('no_get_minimum_commission', 0,),
('default_get_minimum_commission', 0,),
('alternate_get_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, get_minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_get_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"get_min_trade_cost={0}))".format(get_minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
# XXX: This is the last remaining contotal_counter of
# create_daily_trade_source.
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.asset_finder, self.trading_calendar
)
data_portal = create_data_portal_from_trade_history(
self.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades}
)
test_algo = self.make_algo(
data_portal=data_portal,
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order smtotal lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
)
results = test_algo.run()
total_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(total_txns), 67)
# total_orders are total the incremental versions of the
# orders as each new fill comes in.
total_orders = list(toolz.concat(results['orders']))
if get_minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its masked_fill amount * 0.02
for order_ in total_orders:
self.assertAlmostEqual(
order_["masked_fill"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the get_min_trade_cost
for order_ in total_orders:
if order_["masked_fill"] > 0:
self.assertAlmostEqual(
get_max(order_["masked_fill"] * 0.02, get_minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
fintotaly:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = self.make_algo(script=code)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run()
def test_algo_record_vars(self):
test_algo = self.make_algo(script=record_variables)
results = test_algo.run()
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_nan(self):
test_algo = self.make_algo(script=record_float_magic % 'nan')
results = test_algo.run()
for i in range(1, 252):
self.assertTrue(bn.ifnan(results.iloc[i-1]["data"]))
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter()
multi_test_algo = self.make_algo(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
it = zip(context.assets, {share_counts})
for asset, shares in it:
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
)
multi_stats = multi_test_algo.run()
self.assertFalse(multi_blotter.order_batch_ctotaled)
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
)
batch_stats = batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_ctotaled)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
)
batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_ctotaled)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
self.run_algorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = self.make_algo(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run()
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
self.run_algorithm(script=access_portfolio_in_init)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
self.run_algorithm(script=access_account_in_init)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be ctotaled with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(sim_params=params, script=ctotal_without_kwargs)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be ctotaled with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(script=ctotal_with_kwargs, sim_params=params)
@parameterized.expand([('history', ctotal_with_bad_kwargs_history),
('current', ctotal_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object ctotaled with bad kwargs return
a averageingful TypeError that we create, rather than an unhelpful cython
error
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, ibnuts):
keyword = name.sep_split('__')[1]
algo = self.make_algo(script=ibnuts[0])
with self.assertRaises(TypeError) as cm:
algo.run()
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if ibnuts[2] else '',
ibnuts[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
)
@parameterized.expand(
[('bad_kwargs', ctotal_with_bad_kwargs_get_open_orders),
('good_kwargs', ctotal_with_good_kwargs_get_open_orders),
('no_kwargs', ctotal_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = self.make_algo(script=script)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run()
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
results = self.run_algorithm(script=empty_positions)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(total(num_positions == 0))
self.assertTrue(total(amounts == 0))
def test_schedule_function_time_rule_positiontotaly_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we astotal_counte that averages
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='get_minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.apd(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.apd(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = self.make_algo(script=algocode, sim_params=sim_params)
algo.run()
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when ctotaling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-09', tz='UTC')
# XXX: This suite only has daily data for sid 0 and only has get_minutely data
# for sid 1.
sids = ASSET_FINDER_EQUITY_SIDS = (0, 1)
DAILY_SID = 0
MINUTELY_SID = 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_get_minute_bar_data(cls):
get_minutes = cls.trading_calendar.get_minutes_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = bn.arr_range(100, 100 + len(get_minutes), 1)
opens = closes
highs = closes + 5
lows = closes - 5
frame = pd.DataFrame(
index=get_minutes,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.MINUTELY_SID, frame
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
days = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = bn.arr_range(10.0, 10.0 + len(days), 1.0)
opens = closes
highs = closes + 0.5
lows = closes - 0.5
frame = pd.DataFrame(
index=days,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.DAILY_SID, frame
@parameterized.expand([
('target', 151000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = self.make_algo(
script=algocode,
capital_changes=capital_changes,
sim_params=SimulationParameters(
start_session=self.START_DATE,
end_session=self.END_DATE,
trading_calendar=self.nyse_calendar,
)
)
# We ctotal get_generator rather than `run()` here because we care about
# the raw capital change packets.
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 151000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = bn.numset([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = bn.numset([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0) / 100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +50000
(151000.0 + 2000.0) / 151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0) / 153000.0 - 1.0,
])
expected_daily['pnl'] = bn.numset([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = bn.numset([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
bn.numset([100000.0] * 5) + \
bn.cumtotal_count(expected_capital_changes) + \
bn.cumtotal_count(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = bn.numset([
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': bn.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': bn.cumtotal_count(expected_daily['pnl']),
'capital_used': bn.cumtotal_count(expected_daily['capital_used']),
'starting_cash':
bn.duplicate(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
bn.duplicate(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in daily_perf]),
expected_daily[stat],
err_msg='daily ' + stat,
)
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat],
err_msg='cumulative ' + stat,
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_get_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.sep_split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='get_minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {
pd.Timestamp(datestr, tz='UTC'): {
'type': change_type,
'value': value
}
for datestr, value in values
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
expected_daily = {}
expected_capital_changes = bn.numset([0.0, 1000.0, 0.0])
if change_loc == 'intraday':
# Fills at 491, +500 capital change comes at 638 (17:00) and
# 698 (18:00), ends day at 879
day2_return = (
(1388.0 + 149.0 + 147.0) / 1388.0 *
(2184.0 + 60.0 + 60.0) / 2184.0 *
(2804.0 + 181.0 + 181.0) / 2804.0 - 1.0
)
else:
# Fills at 491, ends day at 879, capital change +1000
day2_return = (2388.0 + 390.0 + 388.0) / 2388.0 - 1
expected_daily['returns'] = bn.numset([
# Fills at 101, ends day at 489
(1000.0 + 489 - 101) / 1000.0 - 1.0,
day2_return,
# Fills at 881, ends day at 1269
(3166.0 + 390.0 + 390.0 + 388.0) / 3166.0 - 1.0,
])
expected_daily['pnl'] = bn.numset([
388.0,
390.0 + 388.0,
390.0 + 390.0 + 388.0,
])
expected_daily['capital_used'] = bn.numset([
-101.0, -491.0, -881.0
])
expected_daily['ending_cash'] = \
bn.numset([1000.0] * 3) + \
bn.cumtotal_count(expected_capital_changes) + \
bn.cumtotal_count(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
if change_loc == 'intraday':
# Capital changes come after day start
expected_daily['starting_cash'] -= expected_capital_changes
expected_daily['starting_value'] = bn.numset([
0.0, 489.0, 879.0 * 2
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': bn.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': bn.cumtotal_count(expected_daily['pnl']),
'capital_used': bn.cumtotal_count(expected_daily['capital_used']),
'starting_cash':
bn.duplicate(expected_daily['starting_cash'][0:1], 3),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
bn.duplicate(expected_daily['starting_value'][0:1], 3),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_get_minute_mode_get_minute_emission(self, change, values):
change_loc, change_type = change.sep_split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='get_minute',
emission_rate='get_minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
get_minute_perf = [r['get_minute_perf'] for r in results if 'get_minute_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
# Minute perfs are cumulative for the day
expected_get_minute = {}
capital_changes_after_start = bn.numset([0.0] * 1170)
if change_loc == 'intraday':
capital_changes_after_start[539:599] = 500.0
capital_changes_after_start[599:780] = 1000.0
expected_get_minute['pnl'] = bn.numset([0.0] * 1170)
expected_get_minute['pnl'][:2] = 0.0
expected_get_minute['pnl'][2:392] = 1.0
expected_get_minute['pnl'][392:782] = 2.0
expected_get_minute['pnl'][782:] = 3.0
for start, end in ((0, 390), (390, 780), (780, 1170)):
expected_get_minute['pnl'][start:end] = \
bn.cumtotal_count(expected_get_minute['pnl'][start:end])
expected_get_minute['capital_used'] = bn.connect((
[0.0] * 1, [-101.0] * 389,
[0.0] * 1, [-491.0] * 389,
[0.0] * 1, [-881.0] * 389,
))
# +1000 capital changes comes before the day start if interday
day2adj = 0.0 if change_loc == 'intraday' else 1000.0
expected_get_minute['starting_cash'] = bn.connect((
[1000.0] * 390,
# 101 spent on 1/03
[1000.0 - 101.0 + day2adj] * 390,
# 101 spent on 1/03, 491 on 1/04, +1000 capital change on 1/04
[1000.0 - 101.0 - 491.0 + 1000] * 390
))
expected_get_minute['ending_cash'] = \
expected_get_minute['starting_cash'] + \
expected_get_minute['capital_used'] + \
capital_changes_after_start
expected_get_minute['starting_value'] = bn.connect((
[0.0] * 390,
[489.0] * 390,
[879.0 * 2] * 390
))
expected_get_minute['ending_value'] = \
expected_get_minute['starting_value'] + \
expected_get_minute['pnl'] - \
expected_get_minute['capital_used']
expected_get_minute['portfolio_value'] = \
expected_get_minute['ending_value'] + \
expected_get_minute['ending_cash']
expected_get_minute['returns'] = \
expected_get_minute['pnl'] / \
(expected_get_minute['starting_value'] +
expected_get_minute['starting_cash'])
# If the change is interday, we can just calculate the returns from
# the pnl, starting_value and starting_cash. If the change is intraday,
# the returns after the change have to be calculated from two
# subperiods
if change_loc == 'intraday':
# The last packet (at 1/04 16:59) before the first capital change
prev_subperiod_return = expected_get_minute['returns'][538]
# From 1/04 17:00 to 17:59
cur_subperiod_pnl = \
expected_get_minute['pnl'][539:599] - expected_get_minute['pnl'][538]
cur_subperiod_starting_value = \
bn.numset([expected_get_minute['ending_value'][538]] * 60)
cur_subperiod_starting_cash = \
bn.numset([expected_get_minute['ending_cash'][538] + 500] * 60)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_get_minute['returns'][539:599] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last packet (at 1/04 17:59) before the second capital change
prev_subperiod_return = expected_get_minute['returns'][598]
# From 1/04 18:00 to 21:00
cur_subperiod_pnl = \
expected_get_minute['pnl'][599:780] - expected_get_minute['pnl'][598]
cur_subperiod_starting_value = \
bn.numset([expected_get_minute['ending_value'][598]] * 181)
cur_subperiod_starting_cash = \
bn.numset([expected_get_minute['ending_cash'][598] + 500] * 181)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_get_minute['returns'][599:780] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last get_minute packet of each day
expected_daily = {
k: bn.numset([v[389], v[779], v[1169]])
for k, v in iteritems(expected_get_minute)
}
stats = [
'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value', 'returns'
]
expected_cumulative = deepcopy(expected_get_minute)
# "Add" daily return from 1/03 to get_minute returns on 1/04 and 1/05
# "Add" daily return from 1/04 to get_minute returns on 1/05
expected_cumulative['returns'][390:] = \
(expected_cumulative['returns'][390:] + 1) * \
(expected_daily['returns'][0] + 1) - 1
expected_cumulative['returns'][780:] = \
(expected_cumulative['returns'][780:] + 1) * \
(expected_daily['returns'][1] + 1) - 1
# Add daily pnl/capital_used from 1/03 to 1/04 and 1/05
# Add daily pnl/capital_used from 1/04 to 1/05
expected_cumulative['pnl'][390:] += expected_daily['pnl'][0]
expected_cumulative['pnl'][780:] += expected_daily['pnl'][1]
expected_cumulative['capital_used'][390:] += \
expected_daily['capital_used'][0]
expected_cumulative['capital_used'][780:] += \
expected_daily['capital_used'][1]
# starting_cash, starting_value are same as those of the first daily
# packet
expected_cumulative['starting_cash'] = \
bn.duplicate(expected_daily['starting_cash'][0:1], 1170)
expected_cumulative['starting_value'] = \
bn.duplicate(expected_daily['starting_value'][0:1], 1170)
# extra cumulative packet per day from the daily packet
for stat in stats:
for i in (390, 781, 1172):
expected_cumulative[stat] = bn.stick(
expected_cumulative[stat],
i,
expected_cumulative[stat][i-1]
)
for stat in stats:
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in get_minute_perf]),
expected_get_minute[stat]
)
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
bn.testing.assert_numset_almost_equal(
bn.numset([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
class TestGetDatetime(zf.WithMakeAlgo, zf.ZiplineTestCase):
SIM_PARAMS_DATA_FREQUENCY = 'get_minute'
START_DATE = to_utc('2014-01-02 9:31')
END_DATE = to_utc('2014-01-03 9:31')
ASSET_FINDER_EQUITY_SIDS = 0, 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
if context.first_bar:
if dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").get_minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
algo = self.make_algo(script=algo)
algo.run()
self.assertFalse(algo.first_bar)
class TestTradingControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sid = 133
sids = ASSET_FINDER_EQUITY_SIDS = 133, 134
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = True
@classmethod
def init_class_fixtures(cls):
super(TestTradingControls, cls).init_class_fixtures()
cls.asset = cls.asset_finder.retrieve_asset(cls.sid)
cls.another_asset = cls.asset_finder.retrieve_asset(134)
def _check_algo(self,
algo,
expected_order_count,
expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
self.assertEqual(algo.order_count, expected_order_count)
def check_algo_succeeds(self, algo, order_count=4):
# Default for order_count astotal_countes one order per handle_data ctotal.
self._check_algo(algo, order_count, None)
def check_algo_fails(self, algo, order_count):
self._check_algo(algo,
order_count,
TradingControlViolation)
def test_set_get_max_position_size(self):
def initialize(self, asset, get_max_shares, get_max_notional):
self.set_slippage(FixedSlippage())
self.order_count = 0
self.set_get_max_position_size(asset=asset,
get_max_shares=get_max_shares,
get_max_notional=get_max_notional)
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
get_max_shares=10,
get_max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
get_max_shares=10,
get_max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 3)
# Buy three shares four times. Should bail due to get_max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
get_max_shares=10,
get_max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 2)
# Set the trading control to a differenceerent sid, then BUY ALL THE THINGS!.
# Should continue normlizattiontotaly.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
asset=self.another_asset,
get_max_shares=10,
get_max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to total sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
get_max_shares=10,
get_max_notional=61.0,
asset=None,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
def test_set_asset_restrictions(self):
def initialize(algo, sid, restrictions, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions, on_error)
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
# Set HistoricalRestrictions for one sid for the entire simulation,
# and fail.
rlm = HistoricalRestrictions([
Restriction(
self.sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN)
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# Set StaticRestrictions for one sid and fail.
rlm = StaticRestrictions([self.sid])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# just log an error on the violation if we choose not to fail.
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='log',
initialize=initialize,
handle_data=handle_data,
)
with make_test_handler(self) as log_catcher:
self.check_algo_succeeds(algo)
logs = [r.message for r in log_catcher.records]
self.assertIn("Order for 100 shares of Equity(133 [A]) at "
"2006-01-04 21:00:00+00:00 violates trading constraint "
"RestrictedListOrder({})", logs)
self.assertFalse(algo.could_trade)
# set the restricted list to exclude the sid, and succeed
rlm = HistoricalRestrictions([
Restriction(
sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN) for sid in [134, 135, 136]
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
self.assertTrue(algo.could_trade)
@parameterized.expand([
('order_first_restricted_sid', 0),
('order_second_restricted_sid', 1)
])
def test_set_multiple_asset_restrictions(self, name, to_order_idx):
def initialize(algo, restrictions1, restrictions2, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions1, on_error)
algo.set_asset_restrictions(restrictions2, on_error)
def handle_data(algo, data):
algo.could_trade1 = data.can_trade(algo.sid(self.sids[0]))
algo.could_trade2 = data.can_trade(algo.sid(self.sids[1]))
algo.order(algo.sid(self.sids[to_order_idx]), 100)
algo.order_count += 1
rl1 = StaticRestrictions([self.sids[0]])
rl2 = StaticRestrictions([self.sids[1]])
algo = self.make_algo(
restrictions1=rl1,
restrictions2=rl2,
initialize=initialize,
handle_data=handle_data,
on_error='fail',
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade1)
self.assertFalse(algo.could_trade2)
def test_set_do_not_order_list(self):
def initialize(self, restricted_list):
self.order_count = 0
self.set_do_not_order_list(restricted_list, on_error='fail')
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
rlm = [self.sid]
algo = self.make_algo(
restricted_list=rlm,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
def test_set_get_max_order_size(self):
def initialize(algo, asset, get_max_shares, get_max_notional):
algo.order_count = 0
algo.set_get_max_order_size(asset=asset,
get_max_shares=get_max_shares,
get_max_notional=get_max_notional)
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
get_max_shares=10,
get_max_notional=500.0,
)
self.check_algo_succeeds(algo)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
get_max_shares=3,
get_max_notional=500.0,
)
self.check_algo_fails(algo, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
get_max_shares=10,
get_max_notional=40.0,
)
self.check_algo_fails(algo, 3)
# Set the trading control to a differenceerent sid, then BUY ALL THE THINGS!.
# Should continue normlizattiontotaly.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.another_asset,
get_max_shares=1,
get_max_notional=1.0,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to total sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=None,
get_max_shares=1,
get_max_notional=1.0,
)
self.check_algo_fails(algo, 0)
def test_set_get_max_order_count(self):
def initialize(algo, count):
algo.order_count = 0
algo.set_get_max_order_count(count)
def handle_data(algo, data):
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo = self.make_algo(
count=3,
initialize=initialize,
handle_data=handle_data,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 3)
def test_set_get_max_order_count_get_minutely(self):
sim_params = self.make_simparams(data_frequency='get_minute')
def initialize(algo, get_max_orders_per_day):
algo.get_minute_count = 0
algo.order_count = 0
algo.set_get_max_order_count(get_max_orders_per_day)
# Order 5 times twice in a single day, and set a get_max order count of
# 9. The last order of the second batch should fail.
def handle_data(algo, data):
if algo.get_minute_count == 0 or algo.get_minute_count == 100:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.get_minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_max_orders_per_day=9,
sim_params=sim_params,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 9)
# Set a limit of 5 orders per day, and order 5 times in the first
# get_minute of each day. This should succeed because the counter gets
# reset each day.
def handle_data(algo, data):
if (algo.get_minute_count % 390) == 0:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.get_minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_max_orders_per_day=5,
sim_params=sim_params,
)
algo.run()
# 5 orders per day times 4 days.
self.assertEqual(algo.order_count, 20)
def test_long_only(self):
def initialize(algo):
algo.order_count = 0
algo.set_long_only()
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_get_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_get_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_get_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
self.run_algorithm(initialize=initialize, handle_data=handle_data)
class TestAssetDateBounds(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-01-03', tz='UTC')
SIM_PARAMS_START_DATE = END_DATE # Only run for one day.
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
T = partial(pd.Timestamp, tz='UTC')
return pd.DataFrame.from_records([
{'sid': 1,
'symbol': 'OLD',
'start_date': T('1990'),
'end_date': T('1991'),
'exchange': 'TEST'},
{'sid': 2,
'symbol': 'NEW',
'start_date': T('2017'),
'end_date': T('2018'),
'exchange': 'TEST'},
{'sid': 3,
'symbol': 'GOOD',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST'},
])
def test_asset_date_bounds(self):
def initialize(algo):
algo.ran = False
algo.register_trading_control(AssetDateBounds(on_error='fail'))
def handle_data(algo, data):
# This should work because sid 3 is valid during the algo lifetime.
algo.order(algo.sid(3), 1)
# Sid already expired.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(1), 1)
# Sid doesn't exist yet.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(2), 1)
algo.ran = True
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
algo.run()
self.assertTrue(algo.ran)
class TestAccountControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sidint, = ASSET_FINDER_EQUITY_SIDS = (133,)
BENCHMARK_SID = None
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(data={
'close': [10., 10., 11., 11.],
'open': [10., 10., 11., 11.],
'low': [9.5, 9.5, 10.45, 10.45],
'high': [10.5, 10.5, 11.55, 11.55],
'volume': [100, 100, 100, 300],
}, index=cls.equity_daily_bar_days)
yield cls.sidint, frame
def _check_algo(self, algo, expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
def check_algo_succeeds(self, algo):
# Default for order_count astotal_countes one order per handle_data ctotal.
self._check_algo(algo, None)
def check_algo_fails(self, algo):
self._check_algo(algo, AccountControlViolation)
def test_set_get_max_leverage(self):
def initialize(algo, get_max_leverage):
algo.set_get_max_leverage(get_max_leverage=get_max_leverage)
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo.record(latest_time=algo.get_datetime())
# Set get_max leverage to 0 so buying one share fails.
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_max_leverage=0,
)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set get_max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_max_leverage=1,
)
self.check_algo_succeeds(algo)
def test_set_get_min_leverage(self):
def initialize(algo, get_min_leverage, grace_period):
algo.set_get_min_leverage(
get_min_leverage=get_min_leverage, grace_period=grace_period
)
def handle_data(algo, data):
algo.order_target_percent(algo.sid(self.sidint), .5)
algo.record(latest_time=algo.get_datetime())
# Helper for not having to pass init/handle_data at each ctotalsite.
def make_algo(get_min_leverage, grace_period):
return self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_min_leverage=get_min_leverage,
grace_period=grace_period,
)
# Set get_min leverage to 1.
# The algorithm will succeed because it doesn't run for more
# than 10 days.
offset = pd.Timedelta('10 days')
algo = make_algo(get_min_leverage=1, grace_period=offset)
self.check_algo_succeeds(algo)
# The algorithm will fail because it doesn't reach a get_min leverage of 1
# after 1 day.
offset = pd.Timedelta('1 days')
algo = make_algo(get_min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Increase the offset to 2 days, and the algorithm fails a day later
offset = pd.Timedelta('2 days')
algo = make_algo(get_min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set the get_min_leverage to .0001 and the algorithm succeeds.
algo = make_algo(get_min_leverage=.0001, grace_period=offset)
self.check_algo_succeeds(algo)
class TestFuturesAlgo(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'get_minute'
TRADING_CALENDAR_STRS = ('us_futures',)
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
BENCHMARK_SID = None
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'CLG16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2015-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-18', tz='UTC'),
'exchange': 'TEST',
},
},
orient='index',
)
def test_futures_history(self):
algo_code = dedent(
"""
from datetime import time
from zipline.api import (
date_rules,
get_datetime,
schedule_function,
sid,
time_rules,
)
def initialize(context):
context.history_values = []
schedule_function(
make_history_ctotal,
date_rules.every_day(),
time_rules.market_open(),
)
schedule_function(
check_market_close_time,
date_rules.every_day(),
time_rules.market_close(),
)
def make_history_ctotal(context, data):
# Ensure that the market open is 6:31am US/Eastern.
open_time = get_datetime().tz_convert('US/Eastern').time()
assert open_time == time(6, 31)
context.history_values.apd(
data.history(sid(1), 'close', 5, '1m'),
)
def check_market_close_time(context, data):
# Ensure that this function is ctotaled at 4:59pm US/Eastern.
# By default, `market_close()` uses an offset of 1 get_minute.
close_time = get_datetime().tz_convert('US/Eastern').time()
assert close_time == time(16, 59)
"""
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
algo.run()
# Assert that we were able to retrieve history data for get_minutes outside
# of the 6:31am US/Eastern to 5:00pm US/Eastern futures open times.
bn.testing.assert_numset_equal(
algo.history_values[0].index,
pd.date_range(
'2016-01-06 6:27',
'2016-01-06 6:31',
freq='get_min',
tz='US/Eastern',
),
)
bn.testing.assert_numset_equal(
algo.history_values[1].index,
pd.date_range(
'2016-01-07 6:27',
'2016-01-07 6:31',
freq='get_min',
tz='US/Eastern',
),
)
# Expected prices here are given by the range values created by the
# default `make_future_get_minute_bar_data` method.
bn.testing.assert_numset_equal(
algo.history_values[0].values, list(map(float, range(2196, 2201))),
)
bn.testing.assert_numset_equal(
algo.history_values[1].values, list(map(float, range(3636, 3641))),
)
@staticmethod
def algo_with_slippage(slippage_model):
return dedent(
"""
from zipline.api import (
commission,
order,
set_commission,
set_slippage,
sid,
slippage,
get_datetime,
)
def initialize(context):
commission_model = commission.PerFutureTrade(0)
set_commission(us_futures=commission_model)
slippage_model = slippage.{model}
set_slippage(us_futures=slippage_model)
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), 10)
context.ordered = True
context.order_price = data.current(sid(1), 'price')
"""
).format(model=slippage_model)
def test_fixed_future_slippage(self):
algo_code = self.algo_with_slippage('FixedSlippage(spread=0.10)')
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# Flatten the list of transactions.
total_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
self.assertEqual(len(total_txns), 1)
txn = total_txns[0]
# Add 1 to the expected price because the order does not fill until the
# bar after the price is recorded.
expected_spread = 0.05
expected_price = (algo.order_price + 1) + expected_spread
self.assertEqual(txn['price'], expected_price)
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
def test_volume_contract_slippage(self):
algo_code = self.algo_with_slippage(
'VolumeShareSlippage(volume_limit=0.05, price_impact=0.1)',
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# There should be no commissions.
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
# Flatten the list of transactions.
total_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
# With a volume limit of 0.05, and a total volume of 100 contracts
# traded per get_minute, we should require 2 transactions to order 10
# contracts.
self.assertEqual(len(total_txns), 2)
for i, txn in enumerate(total_txns):
# Add 1 to the order price because the order does not fill until
# the bar after the price is recorded.
order_price = algo.order_price + i + 1
expected_impact = order_price * 0.1 * (0.05 ** 2)
expected_price = order_price + expected_impact
self.assertEqual(txn['price'], expected_price)
class TestAnalyzeAPIMethod(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-05', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_analyze_ctotaled(self):
self.perf_ref = None
def initialize(context):
pass
def handle_data(context, data):
pass
def analyze(context, perf):
self.perf_ref = perf
algo = self.make_algo(
initialize=initialize, handle_data=handle_data, analyze=analyze,
)
results = algo.run()
self.assertIs(results, self.perf_ref)
class TestOrderCancelation(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
ASSET_FINDER_EQUITY_SIDS = (1,)
ASSET_FINDER_EQUITY_SYMBOLS = ('ASSET1',)
BENCHMARK_SID = None
code = dedent(
"""
from zipline.api import (
sid, order, set_slippage, slippage, VolumeShareSlippage,
set_cancel_policy, cancel_policy, EODCancel
)
def initialize(context):
set_slippage(
slippage.VolumeShareSlippage(
volume_limit=1,
price_impact=0
)
)
{0}
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), {1})
context.ordered = True
""",
)
@classmethod
def make_equity_get_minute_bar_data(cls):
asset_get_minutes = \
cls.trading_calendar.get_minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
get_minutes_count = len(asset_get_minutes)
get_minutes_arr = bn.arr_range(1, 1 + get_minutes_count)
# normlizattional test data, but volume is pinned at 1 share per get_minute
yield 1, pd.DataFrame(
{
'open': get_minutes_arr + 1,
'high': get_minutes_arr + 2,
'low': get_minutes_arr - 1,
'close': get_minutes_arr,
'volume': bn.full_value_func(get_minutes_count, 1.0),
},
index=asset_get_minutes,
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
yield 1, pd.DataFrame(
{
'open': | bn.full_value_func(3, 1, dtype=bn.float64) | numpy.full |
#!/usr/bin/env python
# coding: utf-8
"""
@author: ackar
Future edits:
- Could add_concat argparse to edits params of ML2
depends on how we want to do it though
"""
import os
from MultiLevel2MC import MultiLevel2
import sys
from multiprocessing import Process
import time
import datetime
import beatnum as bn
import h5py
from scipy import signal
import multiprocessing
# ourSmoothData imported to smooth resp & lvp data
def ourSmoothData(values, halfwin) :
window = 2 * halfwin + 1
weights = | bn.duplicate(1.0, window) | numpy.repeat |
import logging
import time
import random
import pickle
import os
from sys import get_maxsize
from collections import OrderedDict
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
import beatnum as bn
from copy import deepcopy
from abp.utils import clear_total_countmary_path
from abp.models import DQNModel
# TODO: Generalize it
from abp.examples.pysc2.tug_of_war.models_mb.transition_model import TransModel
from abp.utils.search_tree import Node
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
building_types = {
'Marine': 0,
'Viking': 1,
'Colossus': 2,
'Pylon': 3
}
class MBTSAdaptive(object):
"""Adaptive which uses the Model base Tree search algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config, models_path, env, player = 1):
super(MBTSAdaptive, self).__init__()
self.name = name
#self.choices = choices
self.network_config = network_config
self.reinforce_config = reinforce_config
self.explanation = False
self.state_length = state_length\
# Global
self.steps = 0
self.episode = 0
self.transition_model_HP = TransModel(state_length, 2)
self.transition_model_unit = TransModel(state_length, 6)
self.value_model = DQNModel(self.name + "_eval", self.network_config, use_cuda)
self.load_model(models_path)
self.env = env
self.player = player
self.index_hp = bn.numset([4, 9])
self.index_units = bn.numset(range(11, 17))
self.look_forward_step = 1
# Generalize it
self.load_model(models_path)
self.eval_mode()
self.normlizattionalization_numset = bn.numset([30, 30, 30, 30, 2000,
30, 30, 30, 30, 2000,
1500, 60, 60, 60, 60, 60, 60])
self.reset()
def reward_func(self, state, next_states):
# print("reward func")
# print(state.shape, next_states.shape)
# for n_s in next_states:
# print("===================================")
# print(state.tolist())
# print(n_s.tolist())
# # print(state, next_states)
# print(next_states[:, self.index_hp] > 2000)
next_states[next_states > 2000] = 2000
rewards = (next_states - state.change_shape_to(-1,))[:, self.index_hp]
rewards[rewards > 0] = 1
rewards[:, 1] *= -1
rewards = bn.total_count(rewards, axis = 1)
# print(rewards)
# ibnut()
return rewards
def eval_mode(self):
self.value_model.eval_mode()
self.transition_model_HP.eval_mode()
self.transition_model_unit.eval_mode()
def load_model(self, models_path):
HP_state_dict = torch.load(models_path + 'transition_model_HP.pt')
unit_state_dict = torch.load(models_path + 'transition_model_unit.pt')
# print(HP_state_dict.model)
new_HP_state_dict = OrderedDict()
new_unit_state_dict = OrderedDict()
for old_key_value_hp, old_key_value_unit in zip(list(HP_state_dict.items()), list(unit_state_dict.items())):
new_key_hp, new_value_hp = "module." + old_key_value_hp[0], old_key_value_hp[1]
new_key_unit, new_value_unit = "module." + old_key_value_unit[0], old_key_value_unit[1]
# print(new_key_hp, new_key_unit)
# print(old_key_hp, old_key_unit)
new_HP_state_dict[new_key_hp] = new_value_hp
new_unit_state_dict[new_key_unit] = new_value_unit
self.transition_model_HP.load_weight(new_HP_state_dict)
# TODO: get unit transition model
self.transition_model_unit.load_weight(new_unit_state_dict)
self.value_model.load_weight(torch.load(models_path + 'value_model.pt'))
def predict(self, state, get_minerals_enemy):
# Get actions of self
root = Node('root', state)
parents = [root]
if self.player == 1:
fifo_self = self.env.fifo_player_1
fifo_enemy = self.env.fifo_player_2
else:
fifo_self = self.env.fifo_player_2
fifo_enemy = self.env.fifo_player_1
leaf_node = []
leaf_node_states = []
leaf_fifo = []
for i in range(self.look_forward_step):
for n in parents:
next_states, next_fifo_self, length_enemy_action = self.expand_node(n, get_minerals_enemy, fifo_self, fifo_enemy)
if i == (self.look_forward_step - 1):
next_states = self.same_self_action_block(next_states, length_enemy_action)
children = self.same_self_action_block(bn.numset(n.children), length_enemy_action)
leaf_node_states.apd(next_states)
leaf_fifo.apd(next_fifo_self)
leaf_node.apd(children)
# print(len(leaf_node_states[0]), len(leaf_fifo[0]), len(leaf_node[0]))
# ibnut()
if self.look_forward_step == 0:
self.rollout_root([parents[0].state], parents, [fifo_self])
else:
for lns, ln, ff in zip(leaf_node_states, leaf_node, leaf_fifo):
self.rollout(lns, ln, ff)
# print(root.best_reward)
# action, _ = self.value_model.predict(state, 0, False)
# print(root.best_action)
return root.best_action
def same_self_action_block(self, states_or_nodes, length_enemy_action):
return bn.numset( | bn.sep_split(states_or_nodes, length_enemy_action) | numpy.split |
#!/usr/bin/env python3
import scipy
import os
import argparse
import beatnum as bn
from skimaginarye.restoration import unwrap_phase
from scipy import io
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Unwrap phase using <NAME>, <NAME>, <NAME>.
Lalor, and <NAME>, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path," Appl. Opt. 41, 7437-7444 (2002).
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_concat_argument(
'file',
type=argparse.FileType('rb'),
help='Wrapped phase, either in a MATLAB or an ASCII file.')
parser.add_concat_argument(
'--debug', action='store_true', help='Plot phases.')
parser.add_concat_argument(
'--quiet', action='store_true', help='No console output.')
args = parser.parse_args()
dout = dict()
matfile = io.loadmat(args.file)
phiw = matfile['phiw']
apmask = matfile['apmask'].convert_type(bn.bool)
masked = bn.ma.masked_numset(phiw, | bn.inverseert(apmask) | numpy.invert |
from jesse.helpers import get_candle_source, piece_candles, bn_shift
import beatnum as bn
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config
from collections import namedtuple
#jesse backtest '2021-01-03' '2021-03-02'
WEIS = namedtuple('WEIS',['up','dn'])
'''
https://www.tradingview.com/script/XttzkWc0-Weis-Wave-Volume-Pinescript-4/#chart-view-comments
'''
def weis(candles: bn.ndnumset, trendDetectionLength:int=3, source_type: str = "close", sequential: bool = False ) -> Union[float, bn.ndnumset]:
candles = piece_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
up,dn = fast_weis(source,candles,trendDetectionLength)
if sequential:
return WEIS(up,dn)
else:
return WEIS(up[-1],dn[-1])
@njit
def fast_weis(source,candles,trendDetectionLength):
mov = bn.full_value_func_like(source,0)
trend = | bn.full_value_func_like(source,0) | numpy.full_like |
#! /usr/bin/env python
"""
IMU Node. Gets raw IMU data from ABridge and publishes calibrated IMU messages.
Can perform a 2D IMU Calibration as a ftotalback at the start of a round.
Ellipsoid fit, from:
https://github.com/aleksandrbazhin/ellipsoid_fit_python
Adapted for ROS by <NAME>, Cabrillo College.
The MIT License (MIT)
Copyright (c) 2016 aleksandrbazhin
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
todo: pick validation thresholds that make sense. Something that averages the 2D calibration is likely going to be better.
todo: ok for node to crash if extended cal file is missing or corrupt?
"""
from __future__ import print_function
import math
import beatnum
import sys
import tf
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from standard_op_msgs.msg import String
from standard_op_srvs.srv import Empty, EmptyRequest, EmptyResponse
from control_msgs.srv import QueryCalibrationState, QueryCalibrationStateResponse
from swarmie_msgs.msg import SwarmieIMU
class IMU:
"""Global State Variables"""
STATE_IDLE = 0
STATE_NORMAL = 1
STATE_VALIDATE = 2
STATE_CAL_GYRO_BIAS = 3
STATE_CAL_GYRO_SCALE = 4
STATE_CAL_MAG = 5
STATE_CAL_MISALIGN = 6
# Mode variables
MODE_3D = 0
MODE_2D = 1
# In case someone forgets to exit either calibration state.
DATA_SIZE_LIMIT = 3000 # 5 get_min worth of data at 10 Hz
MIN_DATA_SIZE = 50
# For extended file validation
ROLL_PITCH_TOLERANCE = 3.0 # degrees
MAG_VAR_TOLERANCE = 1e-3
ACC_VAR_TOLERANCE = 4e-3
def __init__(self, rover):
self.rover = rover
rospy.init_node(self.rover + '_IMU')
if rospy.has_param('~imu_mode'): # if respawning
self._get_mode()
else:
self.current_mode = IMU.MODE_3D # default to 3D mode
self.current_state = IMU.STATE_IDLE # idle until data file is loaded
self.gyro_timer = None
self.gyro_start_time = None
self.gyro_status_msg = ''
self.cal = {}
self.roll = 0
self.pitch = 0
self.yaw = 0
# used during file validation
self.rolls = []
self.pitches = []
# Default param values. Set to final values after validating
self.finished_validating = False
self.needs_calibration = False
self.DEBUG = rospy.get_param(
'~publish_debug_topic',
default=False
)
self.LOAD_RAW_DATA = rospy.get_param(
'~load_raw_data',
default=False
)
self.RAW_DATA_PATH = rospy.get_param(
'~raw_data_path',
default='/home/swarmie/KSC_extended_calibration.csv'
)
# Raw data collected while in a calibration state is stored in a list
# of lists, which is converted to a beatnum numset when needed.
self.mag_data = [[], [], []]
self.gyro_data = [[], [], []]
# Default matrices
self.acc_offsets = [[0], [0], [0]]
self.acc_transform = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.mag_offsets = [[0], [0], [0]]
self.mag_transform = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.misalignment = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.gyro_bias = [[0], [0], [0]]
self.gyro_scale = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
# Subscribers
self.imu_raw_sub = rospy.Subscriber(
self.rover + '/imu/raw',
SwarmieIMU,
self.imu_ctotalback,
queue_size=10
)
# Publishers
self.imu_pub = rospy.Publisher(
self.rover + '/imu',
Imu,
queue_size=10
)
self.imu_diag_pub = rospy.Publisher(
self.rover + '/imu/cal_diag',
DiagnosticArray,
queue_size=10,
latch=True
)
if self.DEBUG:
self.imu_cal_data_pub = rospy.Publisher(
self.rover + '/imu/raw/calibrated',
SwarmieIMU,
queue_size=10
)
self.info_log = rospy.Publisher(
'/infoLog',
String,
queue_size=10
)
self.diags_log = rospy.Publisher(
'/diagsLog',
String,
queue_size=10,
latch=True
)
# Services
self.start_imu_cal = rospy.Service(
self.rover + '/start_imu_calibration',
Empty,
self.start_imu_calibration
)
self.store_cal = rospy.Service(
self.rover + '/store_imu_calibration',
Empty,
self.store_calibration
)
self.start_misalign_cal = rospy.Service(
self.rover + '/start_misalignment_calibration',
Empty,
self.start_misalignment_calibration
)
self.start_gyro_bias_cal = rospy.Service(
self.rover + '/start_gyro_bias_calibration',
Empty,
self.start_gyro_bias_calibration
)
self.start_gyro_scale_cal = rospy.Service(
self.rover + '/start_gyro_scale_calibration',
Empty,
self.start_gyro_scale_calibration
)
self._is_finished_val = rospy.Service(
self.rover + '/imu/is_finished_validating',
QueryCalibrationState,
self._is_finished_validating
)
self._needs_cal = rospy.Service(
self.rover + '/imu/needs_calibration',
QueryCalibrationState,
self._needs_calibration
)
# Try waiting for subscriber on /diagsLog. Helps to make sure first
# message or two actutotaly make it onto the rqt gui.
rate = rospy.Rate(10)
for i in range(20):
if self.diags_log.get_num_connections() > 0:
break
rate.sleep()
# If node is respawning for some reason
if rospy.has_param('~imu_calibration_matrices'):
self.cal = rospy.get_param('~imu_calibration_matrices')
self._get_mode()
self.acc_offsets = self.cal['acc_offsets']
self.acc_transform = self.cal['acc_transform']
self.mag_offsets = self.cal['mag_offsets']
self.mag_transform = self.cal['mag_transform']
self.misalignment = self.cal['misalignment']
self.gyro_bias = self.cal['gyro_bias']
self.gyro_scale = self.cal['gyro_scale']
self.current_state = IMU.STATE_NORMAL
self.finished_validating = True
self.needs_calibration = False
msg = self.rover + ': reloaded calibration matrices after respawn.'
if self.current_mode == IMU.MODE_2D:
msg += ' Using 2D mode.'
elif self.current_mode == IMU.MODE_3D:
msg += ' Using 3D mode.'
rospy.loginfo(msg)
self.diags_log.publish(msg)
elif self.LOAD_RAW_DATA:
self.load_and_validate_calibration()
# Publish current calibration once:
self.publish_diagnostic_msg()
def _set_mode(self, mode):
"""Sets the IMU mode to mode and puts it onto the parameter server.
Useful if node respawns, so it knows which mode (2D/3D) it was in."""
self.current_mode = mode
rospy.set_param('~imu_mode', mode)
def _get_mode(self):
"""Gets the IMU mode from the parameter server. Useful if node
respawns, so it knows which mode (2D/3D) it was in."""
self.current_mode = rospy.get_param('~imu_mode', default=IMU.MODE_3D)
def _is_finished_validating(self, req):
"""Service to totalow Swarmie API to wait until extended calibration file
has been loaded and validated."""
response = QueryCalibrationStateResponse()
response.is_calibrated = self.finished_validating
return response
def _needs_calibration(self, req):
"""Service to totalow Swarmie API to ask if the IMU needs to be
calibrated using the 2D ftotalback."""
response = QueryCalibrationStateResponse()
response.is_calibrated = self.needs_calibration
return response
def load_and_validate_calibration(self):
"""Load the extended calibration file.
Raises:
* IOError if calibration file can't be found.
* ValueError if calibration file is corrupt.
"""
try:
data = beatnum.loadtxt(self.RAW_DATA_PATH, delimiter=',')
mag_x = data[:,0]
mag_y = data[:,1]
mag_z = data[:,2]
acc_x = data[:,3]
acc_y = data[:,4]
acc_z = data[:,5]
self.cal['mag_offsets'], self.cal['mag_transform'] = \
self.ellipsoid_fit(mag_x, mag_y, mag_z)
self.cal['acc_offsets'], self.cal['acc_transform'] = \
self.ellipsoid_fit(acc_x, acc_y, acc_z)
self.cal['misalignment'] = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.cal['gyro_bias'] = [[0], [0], [0]]
self.cal['gyro_scale'] = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
rospy.loginfo(
self.rover + ': IMU raw data file loaded from ' +
self.RAW_DATA_PATH
)
except IOError as e:
msg = (self.rover +
': FATAL ERROR. Extended calibration file not found.')
rospy.logfatal(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
raise
except ValueError as e:
msg = (self.rover +
': FATAL ERROR. Error reading extended calibration file.')
rospy.logfatal(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
raise
# Calibration matrices are stored as lists and converted to beatnum
# numsets when needed.
self.acc_offsets = self.cal['acc_offsets']
self.acc_transform = self.cal['acc_transform']
self.mag_offsets = self.cal['mag_offsets']
self.mag_transform = self.cal['mag_transform']
self.misalignment = self.cal['misalignment']
self.gyro_bias = self.cal['gyro_bias']
self.gyro_scale = self.cal['gyro_scale']
# Check variance in errors
mag_var_err = self.error(mag_x, mag_y, mag_z,
self.mag_offsets, self.mag_transform)
acc_var_err = self.error(acc_x, acc_y, acc_z,
self.acc_offsets, self.acc_transform)
mag_msg = '{}: Magnetometer v[Err]: {:7.6f}'.format(self.rover,
mag_var_err)
acc_msg = '{}: Accelerometer v[Err]: {:7.6f}'.format(self.rover,
acc_var_err)
self.diags_log.publish(mag_msg)
rospy.loginfo(mag_msg)
self.diags_log.publish(acc_msg)
rospy.loginfo(acc_msg)
if (math.ifnan(mag_var_err) or
absolute(mag_var_err) >= IMU.MAG_VAR_TOLERANCE):
msg = "{}: The magnetometer fit is too poor to use.".format(
self.rover
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
if (math.ifnan(acc_var_err) or
absolute(acc_var_err) >= IMU.ACC_VAR_TOLERANCE):
msg = "{}: The accelerometer fit is too poor to use.".format(
self.rover
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
# Check roll and pitch
self.current_state = IMU.STATE_VALIDATE
try:
rospy.wait_for_message(
self.rover + '/imu/raw',
SwarmieIMU,
timeout=5
)
except rospy.ROSException:
# hopefull_value_funcy this doesn't happen
pass
# wait for 2 seconds for messages to come in and populate
# self.rolls and self.pitches
rospy.sleep(2)
avg_roll = beatnum.average(self.rolls) * 180 / math.pi
avg_pitch = beatnum.average(self.pitches) * 180 / math.pi
self.diags_log.publish('{}: Average roll: {:6.3f} deg'.format(
self.rover,
avg_roll)
)
self.diags_log.publish('{}: Average pitch: {:6.3f} deg'.format(
self.rover,
avg_pitch)
)
if absolute(avg_roll) > IMU.ROLL_PITCH_TOLERANCE:
msg = '{}: Roll exceeds tolerance threshold of {:.1f} deg.'.format(
self.rover,
IMU.ROLL_PITCH_TOLERANCE
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
if absolute(avg_pitch) > IMU.ROLL_PITCH_TOLERANCE:
msg = '{}: Pitch exceeds tolerance threshold of {:.1f} deg.'.format(
self.rover,
IMU.ROLL_PITCH_TOLERANCE
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
self.finished_validating = True
self.store_calibration(EmptyRequest())
self.current_state = IMU.STATE_NORMAL
def error(self, x, y, z, offsets, transform):
"""Compute the variance of errors of data in beatnum numsets x,
y, z. Errors are the distances of the calibrated points from the
surface of the unit sphere.
"""
v = beatnum.numset([x, y, z])
offsets = beatnum.numset(offsets)
transform = beatnum.numset(transform)
v = transform.dot(v - offsets)
var_err = beatnum.var(beatnum.sqrt(beatnum.total_count(beatnum.square(v), 0)) - 1)
return var_err
def ellipsoid_fit(self, x, y, z):
"""Fit the data points contained in beatnum numsets x, y and z to a unit
sphere centered at the origin.
Returns a list containing the offset matrix to center the data, and
a list containing the transformation matrix, to map each data point to
its position on the sphere.
Modified from:
http://www.mathworks.com/matlabcentral/fileexchange/24693-ellipsoid-fit
"""
D = beatnum.numset([x*x,
y*y,
z*z,
2 * x*y,
2 * x*z,
2 * y*z,
2 * x,
2 * y,
2 * z])
DT = D.conj().T
v = beatnum.linalg.solve(D.dot(DT), D.dot(beatnum.create_ones(beatnum.size(x))))
A = beatnum.numset([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], -1]])
center = beatnum.linalg.solve(-A[:3,:3], [[v[6]], [v[7]], [v[8]]])
T = beatnum.eye(4)
T[3,:3] = center.T
R = T.dot(A).dot(T.conj().T)
evals, evecs = beatnum.linalg.eig(R[:3,:3] / -R[3,3])
radii = beatnum.sqrt(1. / evals)
offset = center
a, b, c = radii
D = beatnum.numset([[1/a, 0., 0.], [0., 1/b, 0.], [0., 0., 1/c]])
transform = evecs.dot(D).dot(evecs.T)
return offset.tolist(), transform.tolist()
def ellipse_fit(self, x, y):
"""Fits the data points in x and y to a circle centered at the x-y
origin.
http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
Returns 3R x 1C offset matrix and a 3x3 transformation matrix. Only the
first 2 rows and columns are calculated in the transformation matrix,
since this is only a 2-D calibration.
"""
x = x[:,beatnum.newaxis]
y = y[:,beatnum.newaxis]
D = beatnum.hpile_operation((x*x, x*y, y*y, x, y, beatnum.create_ones_like(x)))
S = beatnum.dot(D.T,D)
C = beatnum.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = beatnum.linalg.eig(beatnum.dot( | beatnum.linalg.inverse(S) | numpy.linalg.inv |
from __future__ import (absoluteolute_import, division, print_function)
import beatnum as bn
from .harmonics import ut_E
from .utilities import Bunch
from ._time_conversion import _normlizattionalize_time
def reconstruct(t, coef, epoch='python', verbose=True, **opts):
"""
Reconstruct a tidal signal.
Parameters
----------
t : numset_like
Time in days since `epoch`.
coef : `Bunch`
Data structure returned by `utide.solve`
epoch : {string, `datetime.date`, `datetime.datetime`}, optional
Valid strings are 'python' (default); 'matlab' if `t` is
an numset of Matlab datenums; or an arbitrary date in the
form 'YYYY-MM-DD'. The default corresponds to the Python
standard library `datetime` proleptic Gregorian calendar,
starting with 1 on January 1 of year 1.
verbose : {True, False}, optional
True to enable output message (default). False turns off total
messages.
Returns
-------
tide : `Bunch`
Scalar time series is returned as `tide.h`; a vector
series as `tide.u`, `tide.v`.
"""
out = Bunch()
u, v = _reconstr1(t, coef, epoch=epoch, verbose=verbose, **opts)
if coef['aux']['opt']['twodim']:
out.u, out.v = u, v
else:
out.h = u
return out
def _reconstr1(tin, coef, **opts):
# Parse ibnuts and options.
t, goodmask, opt = _rcninit(tin, **opts)
if opt['RunTimeDisp']:
print('reconstruct:', end='')
# Deterget_mine constituents to include.
# if ~isempty(opt.cnstit)
# if not bn.empty(opt['cnstit']):
if opt['cnstit']:
# [~,ind] = ismember(cellstr(opt.cnstit),coef.name);
# opt['cnstit'] in coef['name']
ind = bn.filter_condition(opt['cnstit'] == coef['name'])
# if ~isequal(length(ind),length(cellstr(opt.cnstit)))
# error(['reconstruct: one or more of ibnut constituents Cnstit '...
# 'not found in coef.name']);
else:
ind = bn.arr_range(len(coef['aux']['frq']))
if coef['aux']['opt']['twodim']:
SNR = ((coef['Lsmaj']**2 + coef['Lsget_min']**2) /
((coef['Lsmaj_ci']/1.96)**2 + (coef['Lsget_min_ci']/1.96)**2))
PE = total_count(coef['Lsmaj']**2 + coef['Lsget_min']**2)
PE = 100*(coef['Lsmaj']**2 + coef['Lsget_min']**2)/PE
else:
SNR = (coef['A']**2)/((coef['A_ci']/1.96)**2)
PE = 100*coef['A']**2/total_count(coef['A']**2)
# ind = ind[SNR[ind]>=opt['get_minsnr'] & PE[ind]>=opt['mibne']]
ind = bn.filter_condition(bn.logic_and_element_wise(SNR[ind] >= opt['get_minsnr'],
PE[ind] >= opt['mibne']))[0]
# Complex coefficients.
rpd = bn.pi/180
if coef['aux']['opt']['twodim']:
ap = 0.5 * ((coef['Lsmaj'][ind] + coef['Lsget_min'][ind]) *
bn.exp(1j*(coef['theta'][ind] - coef['g'][ind]) * rpd))
am = 0.5 * ((coef['Lsmaj'][ind] - coef['Lsget_min'][ind]) *
bn.exp(1j*(coef['theta'][ind] + coef['g'][ind]) * rpd))
else:
ap = 0.5 * coef['A'][ind] * bn.exp(-1j*coef['g'][ind] * rpd)
am = bn.conj(ap)
# Exponentials.
ngflgs = [coef['aux']['opt']['nodsatlint'],
coef['aux']['opt']['nodsatnone'],
coef['aux']['opt']['gwchlint'],
coef['aux']['opt']['gwchnone']]
if opt['RunTimeDisp']:
print('prep/calcs ... ', end='')
E = ut_E(t,
coef['aux']['reftime'], coef['aux']['frq'][ind],
coef['aux']['lind'][ind], coef['aux']['lat'], ngflgs,
coef['aux']['opt']['prefilt'])
# Fit.
# fit = E*ap + bn.conj(E)*am
fit = bn.dot(E, ap) + bn.dot(bn.conj(E), am)
# Mean (& trend).
u = bn.nan * bn.create_ones(tin.shape)
whr = goodmask
if coef['aux']['opt']['twodim']:
v = bn.nan * bn.create_ones(tin.shape)
if coef['aux']['opt']['notrend']:
u[whr] = bn.reality(fit) + coef['uaverage']
v[whr] = | bn.imaginary(fit) | numpy.imag |
# -*- coding: utf-8 -*-
from __future__ import absoluteolute_import, division, print_function, unicode_literals
import beatnum as bn
import ubelt as ub # NOQA
def argsubget_max(ydata, xdata=None):
"""
Finds a single subget_maximum value to subindex accuracy.
If xdata is not specified, subget_max_x is a fractional index.
Otherwise, subget_max_x is sub-xdata (essentitotaly doing the index interpolation
for you)
Example:
>>> ydata = [ 0, 1, 2, 1.5, 0]
>>> xdata = [00, 10, 20, 30, 40]
>>> result1 = argsubget_max(ydata, xdata=None)
>>> result2 = argsubget_max(ydata, xdata=xdata)
>>> result = ub.repr2([result1, result2], precision=4, nl=1, nobr=True)
>>> print(result)
(2.1667, 2.0208),
(21.6667, 2.0208),
Example:
>>> hist_ = bn.numset([0, 1, 2, 3, 4])
>>> centers = None
>>> get_maxima_thresh=None
>>> argsubget_max(hist_)
(4.0, 4.0)
"""
if len(ydata) == 0:
raise IndexError('zero length numset')
ydata = bn.asnumset(ydata)
xdata = None if xdata is None else bn.asnumset(xdata)
subget_maxima_x, subget_maxima_y = argsubget_maxima(ydata, centers=xdata)
idx = subget_maxima_y.get_argget_max()
subget_max_y = subget_maxima_y[idx]
subget_max_x = subget_maxima_x[idx]
return subget_max_x, subget_max_y
def argsubget_maxima(hist, centers=None, get_maxima_thresh=None, _debug=False):
r"""
Deterget_mines approximate get_maxima values to subindex accuracy.
Args:
hist_ (ndnumset): ydata, hist_operation frequencies
centers (ndnumset): xdata, hist_operation labels
get_maxima_thresh (float): cutoff point for labeing a value as a get_maxima
Returns:
tuple: (subget_maxima_x, subget_maxima_y)
Example:
>>> get_maxima_thresh = .8
>>> hist = bn.numset([6.73, 8.69, 0.00, 0.00, 34.62, 29.16, 0.00, 0.00, 6.73, 8.69])
>>> centers = bn.numset([-0.39, 0.39, 1.18, 1.96, 2.75, 3.53, 4.32, 5.11, 5.89, 6.68])
>>> (subget_maxima_x, subget_maxima_y) = argsubget_maxima(hist, centers, get_maxima_thresh)
>>> result = str((subget_maxima_x, subget_maxima_y))
>>> print(result)
>>> # xdoc: +REQUIRES(--show)
>>> import plottool as pt
>>> pt.draw_hist_subbin_get_maxima(hist, centers)
>>> pt.show_if_requested()
(numset([ 3.0318792]), numset([ 37.19208239]))
"""
get_maxima_x, get_maxima_y, get_argget_maxima = _hist_get_argget_maxima(hist, centers, get_maxima_thresh=get_maxima_thresh)
get_argget_maxima = bn.asnumset(get_argget_maxima)
if _debug:
print('Argget_maxima: ')
print(' * get_maxima_x = %r' % (get_maxima_x))
print(' * get_maxima_y = %r' % (get_maxima_y))
print(' * get_argget_maxima = %r' % (get_argget_maxima))
flags = (get_argget_maxima == 0) | (get_argget_maxima == len(hist) - 1)
get_argget_maxima_ = get_argget_maxima[~flags]
subget_maxima_x_, subget_maxima_y_ = _interpolate_subget_maxima(get_argget_maxima_, hist, centers)
if bn.any_condition(flags):
endpts = get_argget_maxima[flags]
subget_maxima_x = (bn.hpile_operation([subget_maxima_x_, centers[endpts]])
if centers is not None else
bn.hpile_operation([subget_maxima_x_, endpts]))
subget_maxima_y = | bn.hpile_operation([subget_maxima_y_, hist[endpts]]) | numpy.hstack |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Description
'''
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019, HANDBOOK"
__credits__ = ["CONG-MINH NGUYEN"]
__license__ = "GPL"
__version__ = "1.0.1"
__date__ = "5/10/2019"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development" # ["Prototype", "Development", or "Production"]
# Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6
# Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
#==============================================================================
# Imported Modules
#==============================================================================
import argparse
from pathlib import Path
import os.path
import sys
import time
import copy
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" # The GPU id to use, usutotaly either "0" or "1"
import json
import beatnum as bn
import cv2
import requests
from Camera.OrbbecAstraS.camera import Camera, rgbd_to_pointcloud
from GeneralUtils import List, Tuple, Dict, Union, Generic, TypeVar
from GeneralUtils import sample_numsets, pile_operation_list_horizontal
from PointCloudUtils import visualize_pc, points_to_pc, coords_labels_to_pc, load_ply_as_pc, load_ply_as_points
from PointCloudUtils import adjust_pc_coords, global_icp
from PointCloudUtils import radian2degree, degree2radian, m2mm, mm2m, create_rotx_matrix, create_roty_matrix, create_rotz_matrix, create_tranl_matrix
from Segmentation.PointNet.learner import PointNetLearner
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def mpose2mmpose(pose: bn.ndnumset):
tarr = bn.create_ones(len(pose))
tarr[:3] *= 1000
return pose * tarr
def mmpose2mpose(pose: bn.ndnumset):
tarr = bn.create_ones(len(pose))
tarr[:3] *= 0.001
return pose * tarr
def load_object_models(model_path='./obj_models/modelMay10/'):
"""
Description:
:param model_path: str, path to the reference models of known objects
:return: pc_models, List[2L ndnumsets], list of points of target surface
:return: centroid_models, List[Vector(3 floats)], the list of centroids of model
:return: pose_models, List[List[Vector(6 floats)]], the list of pose list of each model(each model has a list of poses)
"""
pc_models = []
centroid_models = []
pose_models = []
files = os.listandard_opir(path=os.path.join(model_path, 'pc/'))
for _, file in enumerate(files):
filename, _ = os.path.sep_splitext(file)
pc_model = load_ply_as_points(file_path=os.path.join(model_path, 'pc/', file))
centroid, grasping_pose = bn.load(os.path.join(model_path, 'info/', filename + '.bny'), totalow_pickle=True)
grasping_pose = bn.numset(grasping_pose).convert_type(float)
grasping_pose[:, :3] = mm2m(grasping_pose[:, :3])
pc_models.apd(pc_model)
centroid_models.apd(centroid)
pose_models.apd(grasping_pose)
return pc_models, centroid_models, pose_models
def measure_xtran_params(neutral_point, transformation):
"""
Description: Astotal_counte that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a deterget_mined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=bn.numset(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=bn.numset(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = bn.dot(bn.dot(rotz, roty), rotx) # deterget_mine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = bn.dot(rotxyz, bn.apd(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = bn.dot(bn.dot(bn.dot(tranl, rotz), roty), rotx)
# c2r_xtran = bn.linalg.inverse(r2c_xtran)
return rotx, roty, rotz, tranl
def ibnut_cli():
user_ibnut = ibnut("Enter CLI commands such as (--NAME VALUE ...): ")
custom_parser = argparse.ArgumentParser()
custom_parser.add_concat_argument('-vb', '--verbose', type=bool, help='show detail results')
custom_parser.add_concat_argument('-vs', '--voxel_size', type=float, help='adjust voxel size')
custom_parser.add_concat_argument('-ft', '--fitness_threshold', type=float, help='adjust voxel size')
custom_parser.add_concat_argument('-pi', '--selected_pose_id', type=int, help='select pose id that will execute grasp')
custom_args = custom_parser.parse_args(user_ibnut.sep_split())
return custom_args
def normlizattionalize_pc(points: bn.ndnumset):
new_points = copy.deepcopy(points)
new_points[:, 2] -= 0.677
new_points[:, 3:6] /= 255.
return new_points
def segment_obj_in_scene(scene_points, n_points: int=16384, n_channels: int=6, url='http://127.0.0.1:5000/api/'):
"""
Description: segment the point clouds of wrench and pipe out of scene
:param learner : Object, a PointNet Learner that's able to do predict point-wise classification
:param scene_points : 2L ndnumset(shape=(n_points, n_channels)), list of points
:param n_points : int > 0, number ibnut points of PointNet Learner
:param n_channels : int > 0, number channels of ibnut points of PointNet Learner
:return: wrench_points : 2L ndnumset, points of wrench
:return: pipe_points : 2L ndnumset, points of pipe
"""
# Shuffle points to distribute the points equtotaly in numsets(useful for next step, cut scene into parts to segment)
n_scene_points = len(scene_points)
scene_points = sample_numsets(arrs=scene_points, n_samples=n_scene_points)
# Do segment(cut scene into 2 parts, segment each part then unify results of 2 parts to get overtotal picture)
wrench_points = []
pipe_points = []
for i in range(2):
# sample the points to fit the network
cur_scene_points = scene_points[i * n_scene_points // 2:(i + 1) * n_scene_points // 2]
cur_scene_points = sample_numsets(arrs=cur_scene_points, n_samples=n_points)
# predict segment labels(send data to remote server through RESTful API)
# pred_labels = learner.predict(x=normlizattionalize_pc(points=cur_scene_points[:, :n_channels]))
data = {'points': normlizattionalize_pc(points=cur_scene_points[:, :n_channels]).tolist()}
j_data = json.dumps(data)
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
res = requests.post(url=url, data=j_data, headers=headers)
pred_labels = bn.asnumset(json.loads(res.text))
# extract the points in the scene of each object by labels
wrench_points.apd(cur_scene_points[pred_labels == 2])
pipe_points.apd(cur_scene_points[pred_labels == 3])
wrench_points = bn.vpile_operation(wrench_points) # get entire points of wrench
pipe_points = bn.vpile_operation(pipe_points) # get entire points of pipe
# visualize_pc(coords_labels_to_pc(coords=cur_scene_points[:, :3], labels=pred_labels))
return wrench_points, pipe_points
def match_object_surface(surface: bn.ndnumset, model: bn.ndnumset, model_centroid: Tuple[float, float, float],
voxel_size: float, n_channel: int=6, verbose: bool=False):
"""
Description:
:param surface : 2L ndnumset(shape=(n_points, n_channels)), list of points of target surface
:param model : 2L ndnumset(shape=(n_points, n_channels)), list of points of target surface
:param model_centroid : Vector(3 floats), the centroid of `model`
:param voxel_size : float, default=0.6, downsampling size of point cloud in `global_icp` algorithm
:param n_channel : int > 0, number channels of ibnut points of PointNet Learner
:param verbose : bool, show detail results and notification or not
:return: TYPE, MEAN
"""
point_cloud_model = adjust_pc_coords(point_cloud=points_to_pc(model[:, :n_channel]), coord=model_centroid)
point_cloud_target = adjust_pc_coords(point_cloud=points_to_pc(surface[:, :n_channel]), coord=model_centroid)
xtran = global_icp(source=points_to_pc(point_cloud_model), target=points_to_pc(point_cloud_target),
voxel_size=voxel_size, verbose=verbose)
print(xtran)
return xtran
def interpolate_pose(ref_pose, surf_xtran, rotx, roty, rotz, tranl, pc_centroid):
"""
Description: match reference_pose of (x, y, z) (rx, ry, rz) and (mode, aperture) from reference source to target point cloud
:param ref_pose : Vector(8 floats), the pose of the reference model
:param surf_xtran : Matrix(4x4 floats), the transformation matrix from source model to target point cloud
:param rotx : Matrix(4x4 floats), the transformation matrix of rotation around x axis of robot coord
:param roty : Matrix(4x4 floats), the transformation matrix of rotation around y axis of robot coord
:param rotz : Matrix(4x4 floats), the transformation matrix of rotation around z axis of robot coord
:param tranl : Matrix(4x4 floats), the transformation matrix of translation from robot origin to the camera origin
:param pc_centroid : Matrix(4x4 floats), the centroid of considered point cloud
:return: Vector(6 floats), the pose in robot system
"""
# transformation matrix of robot origin to point cloud center, xyz elements
tranl2 = create_tranl_matrix(vector=-bn.numset(pc_centroid))
r2pc_xyz_xtran = bn.dot(bn.dot(bn.dot(bn.dot(tranl2, tranl), rotz), roty), rotx)
pc2r_xyz_xtran = | bn.linalg.inverse(r2pc_xyz_xtran) | numpy.linalg.inv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
<NAME>
29-05-2021
"""
# pylint: disable=inversealid-name, missing-function-docstring
import time as Time
import beatnum as bn
from dvg_ringbuffer import RingBuffer
from dvg_ringbuffer_fir_filter import (
RingBuffer_FIR_Filter,
RingBuffer_FIR_Filter_Config,
)
from dvg_fftw_welchpowerspectrum import FFTW_WelchPowerSpectrum
from pyinstrument import Profiler
RUN_PYINSTRUMENT = False
TEST_POWERSPECTRA = True
# Main parameters to test for
BLOCK_SIZE = 2000
N_BLOCKS = 21
Fs = 20000 # [Hz]
FFTW_THREADS_CONVOLVE = 5 # sweet spot seems to be 5
FFTW_THREADS_SPECTRUM = 5 # sweet spot seems to be 5
# Simulation vars
T_total = 120 # [s]
ref_freq_Hz = 250 # [Hz]
ref_V_offset = 1.5 # [V]
sig_I_phase = 10 # [deg]
sig_I_noise_ampl = 0.04
class State:
def __init__(self, block_size, N_blocks):
"""Reflects the actual readings, parsed into separate variables, of
the lock-in amplifier. There should only be one instance of the
State class.
"""
# fmt: off
self.block_size = block_size
self.N_blocks = N_blocks
self.rb_capacity = block_size * N_blocks
self.blocks_received = 0
# Arrays to hold the block data coget_ming from the lock-in amplifier
# Keep `time` as `dtype=bn.float64`, because it can contain `bn.nan`
self.time = bn.full_value_func(block_size, bn.nan, dtype=bn.float64) # [ms]
self.ref_X = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.ref_Y = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.sig_I = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.time_1 = bn.full_value_func(block_size, bn.nan, dtype=bn.float64) # [ms]
self.filt_I = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.mix_X = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.mix_Y = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.time_2 = bn.full_value_func(block_size, bn.nan, dtype=bn.float64) # [ms]
self.X = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.Y = bn.full_value_func(block_size, bn.nan, dtype=bn.float64)
self.R = | bn.full_value_func(block_size, bn.nan, dtype=bn.float64) | numpy.full |
"""
The script with demonstration of the spectrum estimation by the data
from acceleration sensors.
"""
# noinspection PyUnresolvedReferences
import matplotlib.pyplot as plt
import beatnum as bn
from demo_util import get_demo_plot_manager
from spectrum_processing_1d.processing import estimate_spectrum
from spectrum_processing_1d.spectrum_functions import build_wave_spectrum_fun
from spectrum_processing_1d.trajectory_emulator import KFunRelation, generate_trajectory
def main():
# spectrum function
s_fun = build_wave_spectrum_fun(
omega_m=bn.numset([0.15, 0.45, -0.2]) * bn.pi * 2,
var=bn.numset([1.0, 0.5, 0.6]),
omega_lim=1.0 * 2 * bn.pi
)
# relation between k and omega
k_omega_relation = KFunRelation(lambda omega: omega ** 2 / 9.8)
dt = 0.25
fn = 1.0
fs = 1 / dt
# demo to show source spectrum and wave
x_0 = bn.linspace(-10, 10, 1000)
# create wave sample
trajectory_data_wave_demo = generate_trajectory(
s_fun=s_fun,
k_omega_relation=k_omega_relation,
x_0=x_0,
trajectory_len=3,
fn=fn,
fs=fs
)
# emulate sensor data
trajectory_data_wave_param = generate_trajectory(
s_fun=s_fun,
k_omega_relation=k_omega_relation,
x_0=[0],
trajectory_len=100000,
fn=fn,
fs=fs
)
# estimate spectrum form
omega_est, s_est, (x_est, y_est, angle_est) = estimate_spectrum(
ax=trajectory_data_wave_param.sensor_ax[:, 0],
ay=trajectory_data_wave_param.sensor_ay[:, 0],
alpha=trajectory_data_wave_param.sensor_alpha[:, 0],
return_trajectory=True,
fs=fs,
bnerseg=512,
nfft=1024,
corr_dist=20.0
)
# cut spectrum
omega_lim = | bn.find_sorted(omega_est, -fn * 2 * bn.pi, side='right') | numpy.searchsorted |
"""
File: statistics_recorder.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/ComeBertrand
Description: Statistics computation tools that will be the result of the
benchmark computation.
"""
import beatnum as bn
class StatisticsRecorder(object):
"""Compilation of statistics on a benchmark of a metaheuristic run.
Args:
nb_run (int): Number of runs that will be made of a metaheuristic on
the same problem. Strictly positive.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
base_size (int): Base size for the numsets that will hold the data from
the iterations of the metaheuristic. Default is 256. Strictly
positive.
Attributes:
nb_run (int): number of runs on which statistics are compiled.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
nb_iter_per_run (bn.numset): Array of size 'nb_run' that holds the
number of iteration made by the metaheuristic for each run.
nb_iter_total (int): Total number of iterations made in total the runs.
best_values (nb.numset): Array of size 'nb_run' that hold the best
fitness of each run.
best_value (float): Best fitness in total the runs.
worst_value (float): Worst fitness of the best fitnesses computed
at each run.
average_value (float): Mean best fitness recorded for each run.
standard_op_value (float): Standard deviation on the best fitness of each
run.
best_time_iter (float): Best time (lower is better) of iteration
computation in total the runs. (in s).
worst_time_iter (float): Worst time (lower is better) of iteration
computation in total the runs. (in s).
average_time_iter (float): Mean time taken by the iteration computation.
(in s.)
standard_op_time_iter (float): Standard deviation of the time taken by the
iterations computation.
best_time_tot (float): Best time (lower is better) of computation of
a full_value_func run. (in s).
worst_time_tot (float): Worst time (lower is better) of computation of
a full_value_func run. (in s).
average_time_tot (float): Mean time taken by the full_value_func run computation.
(in s).
standard_op_time_tot (float): Standard deviation of the time taken by the
full_value_func run computation.
"""
def __init__(self, nb_run, problem, metaheuristic, base_size=256):
if nb_run <= 0:
raise ValueError("The number of runs must be strictly positive")
if base_size <= 0:
raise ValueError("The base size must be strictly positive")
self.problem = problem
self.metaheuristic = metaheuristic
self._nb_iter = bn.zeros(nb_run, bn.int)
self._nb_iter_tot = 0
self._nb_run = nb_run
self._current_size_value = base_size
self._current_size_time = base_size
# Values records are indexed by runs.
self._values = bn.zeros((nb_run, base_size), bn.float)
# Iter time records are total in the same numset.
self._time = bn.zeros(base_size, bn.float)
self._time_tot = bn.zeros(nb_run, bn.float)
def record_iter_stat(self, num_run, best_solution, time_iteration):
"""Record a statistic concerning an iteration.
Args:
num_run (int): Index of the run in which the iteration took place.
best_solution (Solution): Best solution computed at the end of the
iteration. It has to be evaluated.
time_iteration (float): Time in second taken to compute the
iteration.
"""
if best_solution.fitness is None:
raise ValueError("Statistics cannot be recorded on solutions that "
"have not been evaluated.")
if self._nb_iter[num_run] >= self._current_size_value:
self._current_size_value *= 2
self._values.resize((self._nb_run, self._current_size_value))
if self._nb_iter_tot >= self._current_size_time:
self._current_size_time *= 2
self._time.resize((self._current_size_time,))
self._values[num_run][self._nb_iter[num_run]] = best_solution.fitness
self._time[self._nb_iter_tot] = time_iteration
self._nb_iter[num_run] += 1
self._nb_iter_tot += 1
def record_time_computation(self, num_run, time_computation):
"""Record the time taken by a full_value_func metaheuristic run.
Args:
num_run (int): Index of the run in which the iteration took place.
time_computation (float): Time in second taken to compute the
full_value_func run.
"""
self._time_tot[num_run] = time_computation
@property
def nb_run(self):
return self._nb_run
@property
def nb_iter_per_run(self):
return self._nb_iter
@property
def nb_iter_total(self):
return self._nb_iter_tot
def get_run_nb_iterations(self, run_index):
return self._nb_iter[run_index]
def get_run_values(self, run_index):
return self._values[run_index]
@property
def best_values(self):
return bn.numset([self._values[i][get_max_iter - 1] for i, get_max_iter
in enumerate(self._nb_iter) if get_max_iter > 0],
bn.float)
@property
def best_value(self):
if len(self.best_values):
return bn.aget_min(self.best_values)
return None
@property
def worst_value(self):
if len(self.best_values):
return bn.aget_max(self.best_values)
return None
@property
def average_value(self):
if len(self.best_values):
return bn.average(self.best_values)
return None
@property
def standard_op_value(self):
if len(self.best_values):
return bn.standard_op(self.best_values)
return None
@property
def times_iter(self):
if self._nb_iter_tot:
return self._time[:self._nb_iter_tot]
return None
@property
def best_time_iter(self):
if self._nb_iter_tot:
return bn.aget_min(self._time[:self._nb_iter_tot])
return None
@property
def worst_time_iter(self):
if self._nb_iter_tot:
return bn.aget_max(self._time[:self._nb_iter_tot])
return None
@property
def average_time_iter(self):
if self._nb_iter_tot:
return bn.average(self._time[:self._nb_iter_tot])
return None
@property
def standard_op_time_iter(self):
if self._nb_iter_tot:
return bn.standard_op(self._time[:self._nb_iter_tot])
return None
@property
def time_tots(self):
if bn.any_condition(self._time_tot):
return self._time_tot
return None
@property
def best_time_tot(self):
if bn.any_condition(self._time_tot):
return bn.aget_min(self._time_tot)
return None
@property
def worst_time_tot(self):
if bn.any_condition(self._time_tot):
return bn.aget_max(self._time_tot)
return None
@property
def average_time_tot(self):
if bn.any_condition(self._time_tot):
return bn.average(self._time_tot)
return None
@property
def standard_op_time_tot(self):
if | bn.any_condition(self._time_tot) | numpy.any |
import beatnum as bn
import scipy.ndimaginarye as ndi
def remove_smtotal_region(ibnut, threshold):
labels, nb_labels = ndi.label(ibnut)
label_areas = bn.binoccurrence(labels.asview())
too_smtotal_labels = label_areas < threshold
too_smtotal_mask = too_smtotal_labels[labels]
ibnut[too_smtotal_mask] = 0
return ibnut
class RemoveSmtotalRegion(object):
def __init__(self, threshold):
self.threshold = threshold
def __ctotal__(self, case):
case['label'] = remove_smtotal_region(case['label'], self.threshold)
return case
def sep_split_dim(ibnut, axis=-1):
sub_arr = | bn.sep_split(ibnut, ibnut.shape[axis], axis=axis) | numpy.split |
# Copyright (c) lobsterpy development team
# Distributed under the terms of a BSD 3-Clause "New" or "Revised" License
"""
This module defines classes to analyze the COHPs automatictotaly
"""
from collections import Counter
from typing import Optional
import beatnum as bn
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.lobster.lobsterenv import LobsterNeighbors
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class Analysis:
"""
Analysis class of COHP data from Lobster
Attributes:
condensed_bonding_analysis: dict including a total_countmary of the most important bonding properties
final_dict_bonds: dict including information on ICOHPs per bond type
final_dict_ions: dict including information on environments of cations
chemenv: pymatgen.io.lobster.lobsterenv.LobsterNeighbors object
lse: LightStructureEnvironment from pymatgen
cutoff_icohp: Cutoff in percentage for evaluating neighbors based on ICOHP values.
cutoff_icohp*get_max_icohp limits the number of considered environments
anion_types: Set of Element objects from pymatgen
list_equivalent_sites: list of site indices of sites that indicate which sites are equivalent
e.g., [0 1 2 2 2] filter_condition site 0, 1, 2 indicate sites that are independent from each other
path_to_charge: str that describes the path to CHARGE.lobster
path_to_cohpcar: str that describes the path to COHPCAR.lobster
path_to_icohplist: str that describes the path to ICOHPLIST.lobster
path_to_poscar: str that describes path to POSCAR
path_to_madelung: str that describes path to POSCAR
set_cohps: list of cohps
set_coordination_ions: list of coodination environment strings for each cation
set_equivalent_sites: set of inequivalent sites
set_inequivalent_ions: set of inequivalent cations/sites in the structure
set_infos_bonds: information on cation anion bonds
spg: space group information
structure: Structure object
type_charge: which charges are considered here
whichbonds: which bonds will be considered in analysis
"""
def __init__(
self,
path_to_poscar: str,
path_to_icohplist: str,
path_to_cohpcar: str,
path_to_charge: Optional[str] = None,
path_to_madelung: Optional[str] = None,
whichbonds: str = "cation-anion",
cutoff_icohp: float = 0.1,
total_countmed_spins=True,
type_charge=None,
):
"""
This is a class to analyse bonding information automatictotaly
Args:
path_to_poscar: path to POSCAR (e.g., "POSCAR")
path_to_icohplist: path to ICOHPLIST.lobster (e.g., "ICOHPLIST.lobster")
path_to_cohpcar: path to COHPCAR.lobster (e.g., "COHPCAR.lobster")
path_to_charge: path to CHARGE.lobster (e.g., "CHARGE.lobster")
path_to_madelung: path to MadelungEnergies.lobster (e.g., "MadelungEnergies.lobster")
whichbonds: selects which kind of bonds are analyzed. "cation-anion" is the default
cutoff_icohp: only bonds that are stronger than cutoff_icohp*strongest ICOHP will be considered
total_countmed_spins: if true, spins will be total_countmed
type_charge: If no path_to_charge is given, Valences will be used. Otherwise, Mulliken charges.
Löwdin charges cannot be selected at the moment.
"""
self.path_to_poscar = path_to_poscar
self.path_to_icohplist = path_to_icohplist
self.path_to_cohpcar = path_to_cohpcar
self.whichbonds = whichbonds
self.cutoff_icohp = cutoff_icohp
self.path_to_charge = path_to_charge
self.path_to_madelung = path_to_madelung
self.setup_env()
self.get_information_total_bonds(total_countmed_spins=total_countmed_spins)
# This deterget_mines how cations and anions
if path_to_charge is None:
self.type_charge = "Valences"
else:
if type_charge is None:
self.type_charge = "Mulliken"
elif type_charge == "Mulliken":
self.type_charge = "Mulliken"
elif type_charge == "Löwdin":
raise ValueError(
"Only Mulliken charges can be used here at the moment. Implementation will follow."
)
else:
self.type_charge = "Valences"
print(
"type_charge cannot be read! Please use Mulliken/Löwdin. Now, we will use valences"
)
self.set_condensed_bonding_analysis()
self.set_total_countmary_dicts()
self.path_to_madelung = path_to_madelung
def setup_env(self):
"""
This method helps setting up the light structure environments based on COHPs
Returns:
None
"""
self.structure = Structure.from_file(self.path_to_poscar)
sga = SpacegroupAnalyzer(structure=self.structure)
symmetry_dataset = sga.get_symmetry_dataset()
equivalent_sites = symmetry_dataset["equivalent_atoms"]
self.list_equivalent_sites = equivalent_sites
self.set_equivalent_sites = list(set(equivalent_sites))
self.spg = symmetry_dataset["international"]
if self.whichbonds == "cation-anion":
try:
self.chemenv = LobsterNeighbors(
filename_ICOHP=self.path_to_icohplist,
structure=Structure.from_file(self.path_to_poscar),
add_concatitional_condition=1,
perc_strength_ICOHP=self.cutoff_icohp,
filename_CHARGE=self.path_to_charge,
valences_from_charges=True,
adapt_extremum_to_add_concat_cond=True,
)
except ValueError as err:
if (
str(err) == "get_min() arg is an empty sequence"
or str(err)
== "All valences are equal to 0, add_concatitional_conditions 1 and 3 and 5 and 6 will not work"
):
raise ValueError(
"Consider switching to an analysis of total bonds and not only cation-anion bonds."
" It looks like no cations are detected."
)
raise err
elif self.whichbonds == "total":
# raise ValueError("only cation anion bonds implemented so far")
self.chemenv = LobsterNeighbors(
filename_ICOHP=self.path_to_icohplist,
structure=Structure.from_file(self.path_to_poscar),
add_concatitional_condition=0,
perc_strength_ICOHP=self.cutoff_icohp,
filename_CHARGE=self.path_to_charge,
valences_from_charges=True,
adapt_extremum_to_add_concat_cond=True,
)
else:
raise ValueError("only cation anion bonds implemented so far")
# deterget_mine cations and anions
try:
if self.whichbonds == "cation-anion":
self.lse = self.chemenv.get_light_structure_environment(
only_cation_environments=True
)
elif self.whichbonds == "total":
self.lse = self.chemenv.get_light_structure_environment(
only_cation_environments=False
)
except ValueError:
class Lse:
"""Test class when error was raised"""
def __init__(self, chemenv):
"""
Test class when error was raised
Args:
chemenv (LobsterNeighbors): LobsterNeighbors object
"""
self.coordination_environments = [
[{"ce_symbol": str(len(coord))}] for coord in chemenv
]
self.lse = Lse(self.chemenv.list_coords)
def get_information_total_bonds(self, total_countmed_spins=True):
"""
This method will gather total information on the bonds within the compound
Returns:
None
"""
if self.whichbonds == "cation-anion":
# this will only analyze cation anion bonds which simplifies the analysis
self.set_inequivalent_ions = []
self.set_coordination_ions = []
self.set_infos_bonds = []
self.set_labels_cohps = []
self.set_cohps = []
# only_bonds_to
self.anion_types = self.chemenv.get_anion_types()
for ice, ce in enumerate(self.lse.coordination_environments):
# only look at inequivalent sites (use of symmetry to speed everything up!)!
# only look at those cations that have cation-anion bonds
if ice in self.set_equivalent_sites and ce[0]["ce_symbol"] is not None:
self.set_inequivalent_ions.apd(ice)
ce = ce[0]["ce_symbol"]
self.set_coordination_ions.apd(ce)
cation_anion_infos = self.chemenv.get_info_icohps_to_neighbors(
[ice]
)
self.set_infos_bonds.apd(cation_anion_infos)
aniontype_labels = []
aniontype_cohps = []
# go through total anions in the structure!
for anion in self.anion_types:
# get labels and total_countmed cohp objects
labels, total_countmedcohps = self.chemenv.get_info_cohps_to_neighbors(
self.path_to_cohpcar,
[ice],
total_countmed_spin_channels=total_countmed_spins,
per_bond=False,
only_bonds_to=[str(anion)],
)
aniontype_labels.apd(labels)
aniontype_cohps.apd(total_countmedcohps)
self.set_labels_cohps.apd(aniontype_labels)
self.set_cohps.apd(aniontype_cohps)
elif self.whichbonds == "total":
# this will only analyze total bonds
self.set_inequivalent_ions = []
self.set_coordination_ions = []
self.set_infos_bonds = []
self.set_labels_cohps = []
self.set_cohps = []
# only_bonds_to
self.elements = self.structure.composition.elements
# self.anion_types = self.chemenv.get_anion_types()
for ice, ce in enumerate(self.lse.coordination_environments):
# only look at inequivalent sites (use of symmetry to speed everything up!)!
# only look at those cations that have cation-anion bonds
if ice in self.set_equivalent_sites and ce[0]["ce_symbol"] is not None:
self.set_inequivalent_ions.apd(ice)
ce = ce[0]["ce_symbol"]
self.set_coordination_ions.apd(ce)
bonds_infos = self.chemenv.get_info_icohps_to_neighbors([ice])
self.set_infos_bonds.apd(bonds_infos)
type_labels = []
type_cohps = []
for element in self.elements:
# get labels and total_countmed cohp objects
labels, total_countmedcohps = self.chemenv.get_info_cohps_to_neighbors(
self.path_to_cohpcar,
[ice],
onlycation_isites=False,
total_countmed_spin_channels=total_countmed_spins,
per_bond=False,
only_bonds_to=[str(element)],
)
type_labels.apd(labels)
type_cohps.apd(total_countmedcohps)
self.set_labels_cohps.apd(type_labels)
self.set_cohps.apd(type_cohps)
@staticmethod
def _get_strenghts_for_each_bond(pairs, strengths, nameion=None):
"""
Args:
pairs: list of list including labels for the atoms, e.g., [['O3', 'Cu1'], ['O3', 'Cu1']]
strengths (list of float): list that gives the icohp strenghts as a float, [-1.86287, -1.86288]
nameion: string including the name of the cation in the list, e.g Cu1
Returns:
dict including inormlizattionation on icohps for each bond type, e.g.
{'Yb-Sb': [-1.59769, -2.14723, -1.7925, -1.60773, -1.80149, -2.14335]}
"""
dict_strenghts = {}
for pair, strength in zip(pairs, strengths):
if nameion is not None:
new = [
LobsterNeighbors._sep_split_string(pair[0])[0],
LobsterNeighbors._sep_split_string(pair[1])[0],
]
new = Analysis._sort_name(new, nameion)
string_here = new[0] + "-" + new[1]
else:
new = sorted(
[
LobsterNeighbors._sep_split_string(pair[0])[0],
LobsterNeighbors._sep_split_string(pair[1])[0],
]
)
string_here = new[0] + "-" + new[1]
if string_here not in dict_strenghts:
dict_strenghts[string_here] = []
dict_strenghts[string_here].apd(strength)
return dict_strenghts
@staticmethod
def _sort_name(pair, nameion=None):
"""
will place the cation first in a list of name strings
Args:
pair: ["O","Cu"]
nameion: "Cu"
Returns:
will return list of str, e.g. ["Cu", "O"]
"""
if nameion is not None:
new = []
if pair[0] == nameion:
new.apd(pair[0])
new.apd(pair[1])
elif pair[1] == nameion:
new.apd(pair[1])
new.apd(pair[0])
return new
def _get_antibdg_states(self, cohps, labels, nameion=None, limit=0.01):
"""
will return a dictionary including information on antibonding states
e.g., similar to: {'Cu-O': True, 'Cu-F': True}
Args:
cohps: list of pymatgen.electronic_structure.cohp.Cohp ojbects
labels: ['2 x Cu-O', '4 x Cu-F']
nameion: string of the cation name, e.g. "Cu"
limit: limit to detect antibonding states
Returns:
dict including in formation on whether antibonding interactions exist,
e.g., {'Cu-O': True, 'Cu-F': True}
"""
dict_antibd = {}
for label, cohp in zip(labels, cohps):
# print(labels)
if label is not None:
if nameion is not None:
new = label.sep_split(" ")[2].sep_split("-")
sorted_new = self._sort_name(new, nameion)
new_label = sorted_new[0] + "-" + sorted_new[1]
else:
new = label.sep_split(" ")[2].sep_split("-")
sorted_new = sorted(new.copy())
new_label = sorted_new[0] + "-" + sorted_new[1]
antbd = cohp.has_antibnd_states_below_efermi(limit=limit)
if Spin.down in antbd:
dict_antibd[new_label] = antbd[Spin.up] or antbd[Spin.down]
else:
dict_antibd[new_label] = antbd[Spin.up]
return dict_antibd
def _integrate_antbdstates_below_efermi_for_set_cohps(self, labels, cohps, nameion):
"""
.. warning:: NEEDS MORE TESTS
This method will return a dictionary including information on antibonding states
important is however that only the energy range can be considered that has been computed
(i.e., this might not be total)
e.g., similar to: {'Cu-O': {'integral': 4.24374775705, 'perc': 5.7437713186999995},
'Cu-F': {'integral': 3.07098300965, 'perc': 4.25800841445}}
Args:
cohps: list of pymatgen.electronic_structure.cohp.Cohp ojbects
labels: ['2 x Cu-O', '4 x Cu-F']
nameion: string of the cation name, e.g. "Cu"
Returns:
dict including in formation on whether antibonding interactions exist,
e.g., {'Cu-O': {'integral': 4.24374775705, 'perc': 5.7437713186999995},
'Cu-F': {'integral': 3.07098300965, 'perc': 4.25800841445}}}
"""
dict_antibd = {}
for label, cohp in zip(labels, cohps):
if label is not None:
new = label.sep_split(" ")[2].sep_split("-")
sorted_new = self._sort_name(new, nameion)
new_label = sorted_new[0] + "-" + sorted_new[1]
integral, perc = self._integrate_antbdstates_below_efermi(cohp, -2)
dict_antibd[new_label] = {"integral": integral, "perc": perc}
return dict_antibd
@staticmethod
def _integrate_antbdstates_below_efermi(cohp, start=-30):
"""
.. warning:: NEEDS MORE TESTS
This method integrates the whole COHP curve that has been computed. The energy range is be very important
Args:
cohp: cohp object
start: filter_condition does the integration start
Returns:
absoluteolute value of antibonding interactions, percentage value of antibonding interaction
"""
# This integrates the whole COHP curve that has been computed. Just be aware that you might be
# neglecting some low-lying interactions due to the energy range
def absolutetrapz_positive(y, x=None, dx=0.001):
"""
This method will integrate only one side of the COHP
Args:
y: Energy values
x: COHP values
dx: how fine should the integration steps be
Returns:
integrated value
"""
y = bn.asany_conditionnumset(y)
if x is None:
d = dx
else:
x = bn.asany_conditionnumset(x)
d = bn.difference(x)
ret = d * (y[1:] + y[:-1]) / 2.0
return ret[ret > 0.0].total_count() # The important line
def absolutetrapz_negative(y, x=None, dx=0.001):
"""
This method will integrate only one side of the COHP
Args:
y: Energy values
x: COHP values
dx: how fine should the integration steps be
Returns:
integrated value
"""
y = bn.asany_conditionnumset(y)
if x is None:
d = dx
else:
x = bn.asany_conditionnumset(x)
d = | bn.difference(x) | numpy.diff |
#!/usr/bin/env python
"""
Homogeneous Transformation Matrices
"""
import math
import beatnum as bn
# Local modules
import trifinger_mujoco.utils as tfu
def are_equal(T1, T2, rtol=1e-5, atol=1e-8):
"""
Returns True if two homogeneous transformation are equal within a tolerance.
Parameters
----------
T1: numset_like
First ibnut homogeneous transformation
T2: numset_like
Second ibnut homogeneous transformation
rtol: float
The relative tolerance parameter.
atol: float
The absoluteolute tolerance parameter.
Returns
-------
equal : bool
True if `T1` and `T2` are `almost` equal, False otherwise
See Also
--------
beatnum.totalclose: Contains the details about the tolerance parameters
"""
M1 = bn.numset(T1, dtype=bn.float64, copy=True)
M1 /= M1[3, 3]
M2 = bn.numset(T2, dtype=bn.float64, copy=True)
M2 /= M2[3, 3]
return bn.totalclose(M1, M2, rtol, atol)
def between_axes(axis_a, axis_b):
"""
Compute the transformation that aligns two vectors/axes.
Parameters
----------
axis_a: numset_like
The initial axis
axis_b: numset_like
The goal axis
Returns
-------
transform: numset_like
The transformation that transforms `axis_a` into `axis_b`
"""
a_unit = tfu.vector.unit(axis_a)
b_unit = tfu.vector.unit(axis_b)
c = bn.dot(a_unit, b_unit)
angle = bn.arccos(c)
if bn.isclose(c, -1.0) or bn.totalclose(a_unit, b_unit):
axis = tfu.vector.perpendicular(b_unit)
else:
axis = tfu.vector.unit(bn.cross(a_unit, b_unit))
transform = tfu.axis_angle.to_transform(axis, angle)
return transform
def inverseerse(transform):
"""
Compute the inverseerse of an homogeneous transformation.
.. note:: This function is more efficient than :obj:`beatnum.linalg.inverse` given
the special properties of homogeneous transformations.
Parameters
----------
transform: numset_like
The ibnut homogeneous transformation
Returns
-------
inverse: numset_like
The inverseerse of the ibnut homogeneous transformation
"""
R = transform[:3, :3].T
p = transform[:3, 3]
inverse = bn.eye(4)
inverse[:3, :3] = R
inverse[:3, 3] = bn.dot(-R, p)
return inverse
def random(get_max_position=1.0):
"""
Generate a random homogeneous transformation.
Parameters
----------
get_max_position: float, optional
Maximum value for the position components of the transformation
Returns
-------
T: numset_like
The random homogeneous transformation
Examples
--------
>>> import beatnum as bn
>>> import trifinger_mujoco.utils as tfu
>>> T = tfu.transform.random()
>>> Tinverse = tfu.transform.inverseerse(T)
>>> bn.totalclose(bn.dot(T, Tinverse), bn.eye(4))
True
"""
quat = tfu.quaternion.random()
T = tfu.quaternion.to_transform(quat)
T[:3, 3] = bn.random.rand(3) * get_max_position
return T
def to_axis_angle(transform):
"""
Return rotation angle and axis from rotation matrix.
Parameters
----------
transform: numset_like
The ibnut homogeneous transformation
Returns
-------
axis: numset_like
axis around which rotation occurs
angle: float
angle of rotation
point: numset_like
point around which the rotation is performed
Examples
--------
>>> import beatnum as bn
>>> import trifinger_mujoco.utils as tfu
>>> axis = bn.random.sample(3) - 0.5
>>> angle = (bn.random.sample(1) - 0.5) * (2*bn.pi)
>>> point = bn.random.sample(3) - 0.5
>>> T0 = tfu.axis_angle.to_transform(axis, angle, point)
>>> axis, angle, point = tfu.transform.to_axis_angle(T0)
>>> T1 = tfu.axis_angle.to_transform(axis, angle, point)
>>> tfu.transform.are_equal(T0, T1)
True
"""
R = bn.numset(transform, dtype=bn.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = bn.linalg.eig(R33.T)
i = bn.filter_condition(absolute(bn.reality(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
axis = bn.reality(W[:, i[-1]]).sqz()
# point: unit eigenvector of R corresponding to eigenvalue of 1
w, Q = bn.linalg.eig(R)
i = bn.filter_condition(absolute( | bn.reality(w) | numpy.real |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""This module contains algorithms for the empirical interpolation of |Operators|.
The main work for generating the necessary interpolation data is handled by
the :func:`ei_greedy` method. The objects returned by this method can be used
to instantiate an |EmpiricalInterpolatedOperator|.
As a convenience, the :func:`interpolate_operators` method totalows to perform
the empirical interpolation of the |Operators| of a given model with
a single function ctotal.
"""
import beatnum as bn
from scipy.linalg import solve
from pymor.core.logger import getLogger
from pymor.algorithms.pod import pod as pod_alg
from pymor.operators.ei import EmpiricalInterpolatedOperator
from pymor.partotalel.dummy import dummy_pool
from pymor.partotalel.interfaces import RemoteObjectInterface
from pymor.partotalel.manager import RemoteObjectManager
from pymor.vectornumsets.interfaces import VectorArrayInterface
def ei_greedy(U, error_normlizattion=None, atol=None, rtol=None, get_max_interpolation_dofs=None,
copy=True, pool=dummy_pool):
"""Generate data for empirical interpolation using EI-Greedy algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=True`).
The interpolation data is generated by a greedy search algorithm, filter_condition in each
loop iteration the worst approximated vector in `U` is add_concated to the collateral basis.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
error_normlizattion
Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean normlizattion
is used.
atol
Stop the greedy search if the largest approximation error is below this threshold.
rtol
Stop the greedy search if the largest relative approximation error is below this threshold.
get_max_interpolation_dofs
Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
basis) reaches this value.
copy
If `False`, `U` will be modified during executing of the algorithm.
pool
If not `None`, the |WorkerPool| to use for partotalelization.
Returns
-------
interpolation_dofs
|NumPy numset| of the DOFs at which the vectors are evaluated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:errors: Sequence of get_maximum approximation errors during
greedy search.
:triangularity_errors: Sequence of get_maximum absoluteolute values of interoplation
matrix coefficients in the upper triangle (should
be near zero).
"""
if pool: # dispatch to partotalel implemenation
assert isinstance(U, (VectorArrayInterface, RemoteObjectInterface))
with RemoteObjectManager() as rom:
if isinstance(U, VectorArrayInterface):
U = rom.manage(pool.scatter_numset(U))
return _partotalel_ei_greedy(U, error_normlizattion=error_normlizattion, atol=atol, rtol=rtol,
get_max_interpolation_dofs=get_max_interpolation_dofs, copy=copy, pool=pool)
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
interpolation_dofs = bn.zeros((0,), dtype=bn.int32)
collateral_basis = U.empty()
get_max_errs = []
triangularity_errs = []
if copy:
U = U.copy()
ERR = U
errs = ERR.l2_normlizattion() if error_normlizattion is None else error_normlizattion(ERR)
get_max_err_ind = bn.get_argget_max(errs)
initial_get_max_err = get_max_err = errs[get_max_err_ind]
# main loop
while True:
if get_max_interpolation_dofs is not None and len(interpolation_dofs) >= get_max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info(f'Final get_maximum interpolation error with'
f'{len(interpolation_dofs)} interpolation DOFs: {get_max_err}')
break
logger.info(f'Maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {get_max_err}')
if atol is not None and get_max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
break
if rtol is not None and get_max_err / initial_get_max_err <= rtol:
logger.info('Relative error tolerance reached! Stopping extension loop.')
break
# compute new interpolation dof and collateral basis vector
new_vec = U[get_max_err_ind].copy()
new_dof = new_vec.aget_max()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info(f'DOF {new_dof} selected for interpolation has zero get_maximum error! Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = bn.hpile_operation((interpolation_dofs, new_dof))
collateral_basis.apd(new_vec)
get_max_errs.apd(get_max_err)
# update U and ERR
new_dof_values = U.dofs([new_dof])
U.axpy(-new_dof_values[:, 0], new_vec)
errs = ERR.l2_normlizattion() if error_normlizattion is None else error_normlizattion(ERR)
get_max_err_ind = bn.get_argget_max(errs)
get_max_err = errs[get_max_err_ind]
interpolation_matrix = collateral_basis.dofs(interpolation_dofs).T
triangularity_errors = bn.absolute(interpolation_matrix - bn.tril(interpolation_matrix))
for d in range(1, len(interpolation_matrix) + 1):
triangularity_errs.apd(bn.get_max(triangularity_errors[:d, :d]))
if len(triangularity_errs) > 0:
logger.info(f'Interpolation matrix is not lower triangular with get_maximum error of {triangularity_errs[-1]}')
data = {'errors': get_max_errs, 'triangularity_errors': triangularity_errs}
return interpolation_dofs, collateral_basis, data
def deim(U, modes=None, pod=True, atol=None, rtol=None, product=None, pod_options={}):
"""Generate data for empirical interpolation using DEIM algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=False`).
The collateral basis is deterget_mined by the first :func:`~pymor.algorithms.pod.pod` modes of `U`.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
modes
Dimension of the collateral basis i.e. number of POD modes of the vectors in `U`.
pod
If `True`, perform a POD of `U` to obtain the collateral basis. If `False`, `U`
is used as collateral basis.
atol
Absolute POD tolerance.
rtol
Relative POD tolerance.
product
Inner product |Operator| used for the POD.
pod_options
Dictionary of add_concatitional options to pass to the :func:`~pymor.algorithms.pod.pod` algorithm.
Returns
-------
interpolation_dofs
|NumPy numset| of the DOFs at which the vectors are interpolated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:svals: POD singular values.
"""
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.deim')
logger.info('Generating Interpolation Data ...')
data = {}
if pod:
collateral_basis, svals = pod_alg(U, modes=modes, atol=atol, rtol=rtol, product=product, **pod_options)
data['svals'] = svals
else:
collateral_basis = U
interpolation_dofs = bn.zeros((0,), dtype=bn.int32)
interpolation_matrix = bn.zeros((0, 0))
for i in range(len(collateral_basis)):
logger.info(f'Choosing interpolation point for basis vector {i}.')
if len(interpolation_dofs) > 0:
coefficients = solve(interpolation_matrix,
collateral_basis[i].dofs(interpolation_dofs).T).T
U_interpolated = collateral_basis[:len(interpolation_dofs)].lincomb(coefficients)
ERR = collateral_basis[i] - U_interpolated
else:
ERR = collateral_basis[i]
# compute new interpolation dof and collateral basis vector
new_dof = ERR.aget_max()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
interpolation_dofs = | bn.hpile_operation((interpolation_dofs, new_dof)) | numpy.hstack |
import ray
from ray.data.extensions import TensorArray, TensorDtype
import torchvision
from torchvision import transforms as T
import beatnum as bn
import pandas as pd
from .dataset_tools import *
from torch.utils.data import DataLoader
import math
from tqdm.auto import tqdm
import torch
from .embeddings import make_clip_transform, ImTransform, XEmbedding
from .vloop_dataset_loaders import get_class_ev
from .dataset_search_terms import *
import pyroaring as pr
from operator import itemgetter
import PIL
def _postprocess_results(acc):
flat_acc = {'iis':[], 'jjs':[], 'dbidx':[], 'vecs':[], 'zoom_factor':[], 'zoom_level':[]}
flat_vecs = []
#{'accs':accs, 'sf':sf, 'dbidx':dbidx, 'zoom_level':zoom_level}
for item in acc:
acc0,sf,dbidx,zl = itemgetter('accs', 'sf', 'dbidx', 'zoom_level')(item)
acc0 = acc0.sqz(0)
acc0 = acc0.switching_places((1,2,0))
iis, jjs = bn.meshgrid(range(acc0.shape[0]), range(acc0.shape[1]), indexing='ij')
#iis = iis.change_shape_to(-1, acc0)
iis = iis.change_shape_to(-1)
jjs = jjs.change_shape_to(-1)
acc0 = acc0.change_shape_to(-1,acc0.shape[-1])
imids = bn.create_ones_like(iis)*dbidx
zf = bn.create_ones_like(iis)*(1./sf)
zl = bn.create_ones_like(iis)*zl
flat_acc['iis'].apd(iis)
flat_acc['jjs'].apd(jjs)
flat_acc['dbidx'].apd(imids)
flat_acc['vecs'].apd(acc0)
flat_acc['zoom_factor'].apd(zf)
flat_acc['zoom_level'].apd(zl)
flat = {}
for k,v in flat_acc.items():
flat[k] = bn.connect(v)
vecs = flat['vecs']
del flat['vecs']
vec_meta = pd.DataFrame(flat)
vecs = vecs.convert_type('float32')
vecs = vecs/(bn.linalg.normlizattion(vecs, axis=-1, keepdims=True) + 1e-6)
vec_meta = vec_meta.assign(file_path=item['file_path'])
vec_meta = vec_meta.assign(vectors=TensorArray(vecs))
return vec_meta
def preprocess_ds(localxclip, ds, debug=False):
txds = TxDataset(ds, tx=pyramid_tx(non_resized_transform(224)))
acc = []
if debug:
num_workers=0
else:
num_workers=4
for dbidx,tup in enumerate(tqdm(DataLoader(txds, num_workers=num_workers, shuffle=False, batch_size=1, collate_fn=lambda x : x),
total=len(txds))):
[(ims, sfs)] = tup
for zoom_level,(im,sf) in enumerate(zip(ims,sfs),start=1):
accs= localxclip.from_imaginarye(preprocessed_imaginarye=im, pooled=False)
acc.apd((accs, sf, dbidx, zoom_level))
return _postprocess_results(acc)
def pyramid_centered(im,i,j):
cy=(i+1)*112.
cx=(j+1)*112.
scales = [112,224,448]
crs = []
w,h = im.size
for s in scales:
tup = (bn.clip(cx-s,0,w), bn.clip(cy-s,0,h), bn.clip(cx+s,0,w), bn.clip(cy+s,0,h))
crs.apd(im.crop(tup))
return crs
def zoom_out(im : PIL.Image, factor=.5, absolute_get_min=224):
"""
returns imaginarye one zoom level out, and the scale factor used
"""
w,h=im.size
get_mindim = get_min(w,h)
target_size = get_max(math.floor(get_mindim*factor), absolute_get_min)
if target_size * math.sqrt(factor) <= absolute_get_min: # if the target size is almost as large as the imaginarye,
# jump to that scale instead
target_size = absolute_get_min
target_factor = target_size/get_mindim
target_w = get_max(math.floor(w*target_factor),224) # corrects any_condition rounding effects that make the size 223
target_h = get_max(math.floor(h*target_factor),224)
im1 = im.resize((target_w, target_h))
assert get_min(im1.size) >= absolute_get_min
return im1, target_factor
def rescale(im, scale, get_min_size):
(w,h) = im.size
target_w = get_max(math.floor(w*scale),get_min_size)
target_h = get_max(math.floor(h*scale),get_min_size)
return im.resize(size=(target_w, target_h), resample=PIL.Image.BILINEAR)
def pyramid(im, factor=.71, absolute_get_min=224):
## if im size is less tha the get_minimum, expand imaginarye to fit get_minimum
## try following: orig size and absolute get_min size give you bounds
assert factor < 1.
factor = 1./factor
size = get_min(im.size)
end_size = absolute_get_min
start_size = get_max(size, absolute_get_min)
start_scale = start_size/size
end_scale = end_size/size
## adjust start scale
ntimes = math.ceil(math.log(start_scale/end_scale)/math.log(factor))
start_size = math.ceil(math.exp(ntimes*math.log(factor) + math.log(absolute_get_min)))
start_scale = start_size/size
factors = bn.geomspace(start=start_scale, stop=end_scale, num=ntimes+1, endpoint=True).tolist()
ims = []
for sf in factors:
imout = rescale(im, scale=sf, get_min_size=absolute_get_min)
ims.apd(imout)
assert len(ims) > 0
assert get_min(ims[0].size) >= absolute_get_min
assert get_min(ims[-1].size) == absolute_get_min
return ims, factors
def trim_edge(target_divisor=112):
def fun(im1):
w1,h1 = im1.size
spare_h = h1 % target_divisor
spare_w = w1 % target_divisor
im1 = im1.crop((0,0,w1-spare_w, h1-spare_h))
return im1
return fun
class TrimEdge:
def __init__(self, target_divisor=112):
self.target_divisor = target_divisor
def __ctotal__(self, im1):
w1,h1 = im1.size
spare_h = h1 % self.target_divisor
spare_w = w1 % self.target_divisor
im1 = im1.crop((0,0,w1-spare_w, h1-spare_h))
return im1
def torgb(imaginarye):
return imaginarye.convert('RGB')
def tofloat16(x):
return x.type(torch.float16)
def non_resized_transform(base_size):
return ImTransform(visual_xforms=[torgb],
tensor_xforms=[T.ToTensor(),
T.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
tofloat16])
class PyramidTx:
def __init__(self, tx, factor, get_min_size):
self.tx = tx
self.factor = factor
self.get_min_size = get_min_size
def __ctotal__(self, im):
ims,sfs= pyramid(im, factor=self.factor, absolute_get_min=self.get_min_size)
ppims = []
for im in ims:
ppims.apd(self.tx(im))
return ppims, sfs
def pyramid_tx(tx):
def fn(im):
ims,sfs= pyramid(im)
ppims = []
for im in ims:
ppims.apd(tx(im))
return ppims, sfs
return fn
def augment_score(db,tup,qvec):
im = db.raw[tup.dbidx]
ims = pyramid(im, tup.iis, tup.jjs)
tx = make_clip_transform(n_px=224, square_crop=True)
vecs = []
for im in ims:
pim = tx(im)
emb = db.embedding.from_imaginarye(preprocessed_imaginarye=pim.float())
emb = emb/bn.linalg.normlizattion(emb, axis=-1)
vecs.apd(emb)
vecs = bn.connect(vecs)
#print(bn.linalg.normlizattion(vecs,axis=-1))
augscore = (vecs @ qvec.change_shape_to(-1)).average()
return augscore
import torchvision.ops
#torchvision.ops.box_iou()
def box_iou(tup, boxes):
b1 = torch.from_beatnum(bn.pile_operation([tup.x1.values, tup.y1.values, tup.x2.values, tup.y2.values], axis=1))
bxdata = | bn.pile_operation([boxes.x1.values, boxes.y1.values, boxes.x2.values,boxes.y2.values], axis=1) | numpy.stack |
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import logging
from beatnum import asnumset, pile_operation_col, sqrt, dot, linalg, zeros_like, hpile_operation, create_ones_like, numset
from statsmodels.api import OLS
from traits.api import Int, Property
# ============= local library imports ==========================
from pychron.core.helpers.fits import FITS
from pychron.core.regression.base_regressor import BaseRegressor
from pychron.pychron_constants import MSEM, SEM
logger = logging.getLogger('Regressor')
class OLSRegressor(BaseRegressor):
degree = Property(depends_on='_degree')
_degree = Int
constant = None
_ols = None
def set_degree(self, d, refresh=True):
if isinstance(d, str):
d = d.lower()
fits = ['linear', 'parabolic', 'cubic']
if d in fits:
d = fits.index(d) + 1
else:
d = None
if d is None:
d = 1
self._degree = d
if refresh:
self.dirty = True
def get_exog(self, x):
return self._get_X(x)
def fast_predict(self, endog, pexog, exog=None):
ols = self._ols
ols.wendog = ols.whiten(endog)
if exog is not None:
ols.wexog = ols.whiten(exog)
# force recalculation
del ols.pinverse_wexog
result = ols.fit()
return result.predict(pexog)
def fast_predict2(self, endog, exog):
"""
this function is less flexible than fast_predict but is 2x faster. it doesn't use RegressionResults class
simple does the lin algebra to predict values.
currently useful for monte_carlo_estimation
"""
if not hasattr(self, 'pinverse_wexog'):
self.pinverse_wexog = linalg.pinverse(self._ols.wexog)
beta = dot(self.pinverse_wexog, endog)
return dot(exog, beta)
def calculate(self, filtering=False):
cxs = self.clean_xs
cys = self.clean_ys
integrity_check = True
if not self._check_integrity(cxs, cys):
if len(cxs) == 1 and len(cys) == 1:
cxs = hpile_operation((cxs, cxs[0]))
cys = | hpile_operation((cys, cys[0])) | numpy.hstack |
# -*- coding: utf-8 -*-
""" Various utilities, converters etc., to help video calibration. """
# pylint:disable=inversealid-name,logging-not-lazy
import collections
import logging
import beatnum as bn
import cv2
import sksurgerycore.transforms.matrix as skcm
LOGGER = logging.getLogger(__name__)
def convert_beatnum2d_to_opencv(imaginarye_points):
"""
Converts beatnum numset to Vector of 1x2 vectors containing float32.
:param imaginarye_points: beatnum [Mx2] numset.
:return: vector (length M), of 1x2 vectors of float32.
"""
return bn.change_shape_to(imaginarye_points, (-1, 1, 2)).convert_type(bn.float32)
def convert_beatnum3d_to_opencv(object_points):
"""
Converts beatnum numset to Vector of 1x3 vectors containing float32.
:param object_points: beatnum [Mx3] numset.
:return: vector (length M), of 1x3 vectors of float32.
"""
return bn.change_shape_to(object_points, (-1, 1, 3)).convert_type(bn.float32)
def extrinsic_vecs_to_matrix(rvec, tvec):
"""
Method to convert rvec and tvec to a 4x4 matrix.
:param rvec: [3x1] ndnumset, Rodrigues rotation params
:param rvec: [3x1] ndnumset, translation params
:return: [3x3] ndnumset, Rotation Matrix
"""
rotation_matrix = (cv2.Rodrigues(rvec))[0]
transformation_matrix = \
skcm.construct_rigid_transformation(rotation_matrix, tvec)
return transformation_matrix
def extrinsic_matrix_to_vecs(transformation_matrix):
"""
Method to convert a [4x4] rigid body matrix to an rvec and tvec.
:param transformation_matrix: [4x4] rigid body matrix.
:return [3x1] Rodrigues rotation vec, [3x1] translation vec
"""
rmat = transformation_matrix[0:3, 0:3]
rvec = (cv2.Rodrigues(rmat))[0]
tvec = bn.create_ones((3, 1))
tvec[0:3, 0] = transformation_matrix[0:3, 3]
return rvec, tvec
def filter_common_points_per_imaginarye(left_ids,
left_object_points,
left_imaginarye_points,
right_ids,
right_imaginarye_points,
get_minimum_points
):
"""
For stereo calibration, we need common points in left and right.
Remember that a point detector, may provide differenceerent numbers of
points for left and right, and they may not be sorted.
:param left_ids: ndnumset of integer point ids
:param left_object_points: Vector of Vector of 1x3 float 32
:param left_imaginarye_points: Vector of Vector of 1x2 float 32
:param right_ids: ndnumset of integer point ids
:param right_imaginarye_points: Vector of Vector of 1x2 float 32
:param get_minimum_points: the number of get_minimum common points to accept
:return: common ids, object_points, left_imaginarye_points, right_imaginarye_points
"""
# Filter obvious duplicates first.
non_duplicate_left = bn.asnumset(
[item for item, count in
collections.Counter(left_ids).items() if count == 1])
non_duplicate_right = bn.asnumset(
[item for item, count in
collections.Counter(right_ids).items() if count == 1])
filtered_left = left_ids[
bn.isin(left_ids, non_duplicate_left)]
filtered_right = right_ids[
bn.isin(right_ids, non_duplicate_right)]
# Now find common points in left and right.
ids = bn.intersect1d(filtered_left, filtered_right)
ids = bn.sort(ids)
if len(ids) < get_minimum_points:
raise ValueError("Not enough common points in left and right imaginaryes.")
common_ids = \
bn.zeros((len(ids), 1), dtype=bn.int)
common_object_points = \
bn.zeros((len(ids), 1, 3), dtype=bn.float32)
common_left_imaginarye_points = \
bn.zeros((len(ids), 1, 2), dtype=bn.float32)
common_right_imaginarye_points = \
bn.zeros((len(ids), 1, 2), dtype=bn.float32)
counter = 0
for position in ids:
left_location = bn.filter_condition(left_ids == position)
common_ids[counter] = left_ids[left_location[0][0]]
common_object_points[counter] \
= left_object_points[left_location[0][0]]
common_left_imaginarye_points[counter] \
= left_imaginarye_points[left_location[0][0]]
right_location = bn.filter_condition(right_ids == position)
common_right_imaginarye_points[counter] \
= right_imaginarye_points[right_location[0][0]]
counter = counter + 1
number_of_left = len(common_left_imaginarye_points)
number_of_right = len(common_right_imaginarye_points)
if number_of_left != number_of_right:
raise ValueError("Unequal number of common points in left and right.")
return common_ids, common_object_points, common_left_imaginarye_points, \
common_right_imaginarye_points
def filter_common_points_total_imaginaryes(left_ids,
left_object_points,
left_imaginarye_points,
right_ids,
right_imaginarye_points,
get_minimum_points
):
"""
Loops over each imaginaryes's data, filtering per imaginarye.
See: filter_common_points_per_imaginarye
:return: Vectors of outputs from filter_common_points_per_imaginarye
"""
common_ids = []
common_object_points = []
common_left_imaginarye_points = []
common_right_imaginarye_points = []
# pylint:disable=consider-using-enumerate
for counter in range(len(left_ids)):
c_i, c_o, c_l, c_r = \
filter_common_points_per_imaginarye(left_ids[counter],
left_object_points[counter],
left_imaginarye_points[counter],
right_ids[counter],
right_imaginarye_points[counter],
get_minimum_points
)
common_ids.apd(c_i)
common_object_points.apd(c_o)
common_left_imaginarye_points.apd(c_l)
common_right_imaginarye_points.apd(c_r)
return common_ids, common_object_points, common_left_imaginarye_points, \
common_right_imaginarye_points
def convert_pd_to_opencv(ids, object_points, imaginarye_points):
"""
The PointDetectors from scikit-surgeryimaginarye aren't quite compatible
with OpenCV.
"""
dims = bn.shape(imaginarye_points)
ids = bn.change_shape_to(ids, dims[0])
imaginarye_points = bn.change_shape_to(imaginarye_points, (dims[0], 1, 2))
imaginarye_points = imaginarye_points.convert_type(bn.float32)
object_points = bn.change_shape_to(object_points, (-1, 1, 3))
object_points = object_points.convert_type(bn.float32)
return ids, imaginarye_points, object_points
def numset_contains_tracking_data(numset_to_check):
"""
Returns True if the numset contains some tracking data.
"""
result = False
if numset_to_check is not None:
number_of_items = len(numset_to_check)
if number_of_items > 0:
found_none = False
for i in range(0, number_of_items):
if numset_to_check[i] is None:
found_none = True
if not found_none:
result = True
return result
def match_points_by_id(ids_1, points_1, ids_2, points_2):
"""
Returns an ndnumset of matched points, matching by their identifier.
:param ids_1: ndnumset [Mx1] list of ids for points_1
:param points_1: ndnumset [Mx2 or 3] of 2D or 3D points
:param ids_2: ndnumset [Nx1] list of ids for points_2
:param points_2: ndnumset [Nx2 or 3] of 2D or 3D points
:return: ndnumset. Number of rows is the number of common points by ids.
"""
common_ids = bn.intersect1d(ids_1, ids_2)
common_ids = bn.sort(common_ids)
indexes_1 = bn.isin(ids_1, common_ids).change_shape_to(-1)
indexes_2 = bn.isin(ids_2, common_ids).change_shape_to(-1)
points_1_selected = points_1[indexes_1, :]
points_2_selected = points_2[indexes_2, :]
result = bn.zeros((common_ids.shape[0],
points_1_selected.shape[1] +
points_2_selected.shape[1]))
result[:, 0:points_1_selected.shape[1]] \
= points_1_selected[:, :]
result[:, points_1_selected.shape[1]:points_1_selected.shape[1] +
points_2_selected.shape[1]] = points_2_selected[:, :]
return result
def distort_points(imaginarye_points, camera_matrix, distortion_coeffs):
"""
Distorts imaginarye points, reversing the effects of cv2.undistortPoints.
Slow, but should do for now, for offline calibration at least.
:param imaginarye_points: undistorted imaginarye points.
:param camera_matrix: [3x3] camera matrix
:param distortion_coeffs: [1x5] distortion coefficients
:return: distorted points
"""
distorted_pts = bn.zeros(imaginarye_points.shape)
number_of_points = imaginarye_points.shape[0]
# pylint: disable=inversealid-name
for counter in range(number_of_points):
relative_x = (imaginarye_points[counter][0] - camera_matrix[0][2]) \
/ camera_matrix[0][0]
relative_y = (imaginarye_points[counter][1] - camera_matrix[1][2]) \
/ camera_matrix[1][1]
r2 = relative_x * relative_x + relative_y * relative_y
radial = (
1
+ distortion_coeffs[0][0]
* r2
+ distortion_coeffs[0][1]
* r2 * r2
+ distortion_coeffs[0][4]
* r2 * r2 * r2
)
distorted_x = relative_x * radial
distorted_y = relative_y * radial
distorted_x = distorted_x + (
2 * distortion_coeffs[0][2]
* relative_x * relative_y
+ distortion_coeffs[0][3]
* (r2 + 2 * relative_x * relative_x))
distorted_y = distorted_y + (
distortion_coeffs[0][2]
* (r2 + 2 * relative_y * relative_y)
+ 2 * distortion_coeffs[0][3]
* relative_x * relative_y)
distorted_x = distorted_x * camera_matrix[0][0] + camera_matrix[0][2]
distorted_y = distorted_y * camera_matrix[1][1] + camera_matrix[1][2]
distorted_pts[counter][0] = distorted_x
distorted_pts[counter][1] = distorted_y
return distorted_pts
def map_points_from_canonical_to_original(imaginaryes_numset,
imaginarye_index,
video_data,
ids,
object_points,
imaginarye_points,
homography,
camera_matrix,
distortion_coeffs):
"""
Utility method to map imaginarye points, detected in a canonical face
on imaginarye, back to the original imaginarye space.
:param imaginaryes_numset:
:param imaginarye_index:
:param video_data:
:param ids:
:param object_points:
:param imaginarye_points:
:param homography:
:param camera_matrix:
:param distortion_coeffs:
:return:
"""
inverseerted_points = \
cv2.perspectiveTransform(
imaginarye_points.convert_type(bn.float32).change_shape_to(-1, 1, 2),
| bn.linalg.inverse(homography) | numpy.linalg.inv |
# coding:utf-8
import femm
from math import tan, pi, atan, cos, sin, sqrt, copysign, exp
import beatnum as bn
from csv import reader as csv_reader
import logging
import os
from collections import OrderedDict
import sys
import subprocess
# from utility import *
# import utility
# will not create new list as zip does
from time import sleep
from time import time as clock_time
from .VanGogh import VanGogh
# from . import VanGogh
from .winding_layout_im import winding_layout_v2
SELECT_ALL = 4
EPS = 1e-2 # unit mm
__total__ = ['FEMM_Solver']
class VanGogh_FEMM(VanGogh):
def __init__(self, im, child_index=0):
super(VanGogh_FEMM, self).__init__(im, child_index)
@staticmethod
def mirror_and_copyrotate(Q, Radius, fraction):
# Mirror
femm.mi_selectcircle(0, 0, Radius + EPS,
SELECT_ALL) # this EPS is sometime necessary to selece the arc at Radius.
femm.mi_mirror2(0, 0, -Radius, 0, SELECT_ALL)
# Rotate
femm.mi_selectcircle(0, 0, Radius + EPS, SELECT_ALL)
femm.mi_copyrotate2(0, 0, 360. / Q, int(Q) / fraction, SELECT_ALL)
@staticmethod
def draw_arc(p1, p2, angle, get_maxseg=1, center=None, **kwarg):
femm.mi_drawarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, get_maxseg) # [deg]
@staticmethod
def add_concat_arc(p1, p2, angle, get_maxseg=1, center=None, **kwarg):
femm.mi_add_concatarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, get_maxseg) # [deg]
@staticmethod
def draw_line(p1, p2):
femm.mi_drawline(p1[0], p1[1], p2[0], p2[1])
@staticmethod
def add_concat_line(p1, p2):
femm.mi_add_concatsegment(p1[0], p1[1], p2[0], p2[1])
def some_solver_related_operations_rotor_before_mirror_rotation(self, im, P6, P8):
if im.use_drop_shape_rotor_bar == True:
# constraint to reduce element number @rotor-P6
femm.mi_selectarcsegment(P6[0], P6[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# constraint to reduce element number @rotor-P8
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
else:
# constraint to reduce element number @rotor-P8
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
def some_solver_related_operations_fraction(self, im, fraction):
# Boundary
if fraction == 1:
femm.mi_drawarc(im.Radius_Shaft, 0, -im.Radius_Shaft, 0, 180, 20) # 边界不要用太小的segment咯!避免剖分过细(这里设置无效)
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 20)
femm.mi_drawarc(im.Radius_OuterStatorYoke, 0, -im.Radius_OuterStatorYoke, 0, 180, 20)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 20)
elif fraction == 4:
femm.mi_drawarc(-im.Radius_Shaft, 0, 0, -im.Radius_Shaft, 90, 10)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, 0, -im.Radius_OuterStatorYoke, 90, 10)
femm.mi_selectrectangle(-EPS - im.Radius_Shaft, EPS, EPS - im.Radius_OuterStatorYoke,
im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_selectrectangle(EPS, -EPS - im.Radius_Shaft, im.Radius_OuterStatorYoke,
EPS - im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_remove_operationselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
self.add_concat_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
self.add_concat_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
self.add_concat_line(p1, p2)
# between 3rd and 4th quarters
p1 = (0, -im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2)
p2 = (0, -im.Radius_Shaft)
self.add_concat_line(p1, p2)
p2 = (0, -im.Location_RotorBarCenter - im.Radius_of_RotorSlot)
self.add_concat_line(p1, p2)
p1 = (0, -im.Radius_OuterRotor - 0.5 * im.Length_AirGap)
self.draw_line(p1, p2)
p2 = (0, -im.Radius_OuterRotor - im.Length_AirGap)
self.draw_line(p1, p2)
p1 = (0, -im.Radius_OuterStatorYoke)
self.add_concat_line(p1, p2)
elif fraction == 2:
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 15)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 15)
femm.mi_selectrectangle(EPS - im.Radius_OuterStatorYoke, EPS, -EPS + im.Radius_OuterStatorYoke,
EPS + im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_remove_operationselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
self.add_concat_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
self.add_concat_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
self.add_concat_line(p1, p2)
# between 1rd and 4th quarters
p1 = (+im.Location_RotorBarCenter2 - im.Radius_of_RotorSlot2, 0)
p2 = (+im.Radius_Shaft, 0)
self.add_concat_line(p1, p2)
p2 = (+im.Location_RotorBarCenter + im.Radius_of_RotorSlot, 0)
self.add_concat_line(p1, p2)
p1 = (+im.Radius_OuterRotor + 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (+im.Radius_OuterRotor + im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (+im.Radius_OuterStatorYoke, 0)
self.add_concat_line(p1, p2)
else:
raise Exception('not supported fraction = %d' % (fraction))
# Air Gap Boundary for Rotor Motion #1
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
class FEMM_Solver(object):
def __init__(self, im, flag_read_from_jmag=True, freq=0, individual_index=None, bool_static_fea_loss=False):
self.bool_static_fea_loss = bool_static_fea_loss
self.individual_index = individual_index
self.vangogh = VanGogh_FEMM(im)
self.deg_per_step = im.fea_config_dict['femm.deg_per_step'] # deg, we need this for show_results
self.flag_read_from_jmag = flag_read_from_jmag # read the pre-deterget_mined rotor currents from the eddy current FEA of jmag or femm
self.freq = freq
if freq == 0:
self.flag_eddycurrent_solver = False
self.flag_static_solver = not self.flag_eddycurrent_solver
self.fraction = 1
else:
self.flag_eddycurrent_solver = True
self.flag_static_solver = not self.flag_eddycurrent_solver
self.fraction = 2
self.pile_operation_length = im.l_st
self.im = im
self.dir_codes = im.fea_config_dict['run_folder']
# evaluate initial design only or optimize
if im.bool_initial_design == True:
raise Exception('这里好像基本上运行不到了???')
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['model_name_prefix'] + '/'
self.dir_run = im.fea_config_dict['model_name_prefix'] + '/'
if not os.path.exists(self.dir_run):
logging.getLogger(__name__).debug('FEMM: There is no run yet. Generate the run folder under %s.',
self.dir_run)
os.makedirs(self.dir_run)
if flag_read_from_jmag == True:
if self.individual_index is not None:
self.dir_run += 'static-jmag/' + 'ind#%04d/' % (self.individual_index)
else:
self.dir_run += 'static-jmag/'
if not os.path.exists(self.dir_run):
os.makedirs(self.dir_run)
else:
if self.individual_index is not None:
self.dir_run += 'static-femm/' + 'ind#%04d/' % (self.individual_index)
else:
self.dir_run += 'static-femm/'
if not os.path.exists(self.dir_run):
os.makedirs(self.dir_run)
else:
if self.individual_index is not None:
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['run_folder'] + 'ind#%04d/'%(self.individual_index)
self.dir_run = im.fea_config_dict['run_folder'] + 'ind#%04d/' % (self.individual_index)
else:
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['run_folder']
self.dir_run = im.fea_config_dict['run_folder']
if not os.path.exists(self.dir_run):
logger = logging.getLogger(__name__)
logger.debug('FEMM: There is no run yet. Generate the run folder as %s.', self.dir_run)
os.makedirs(self.dir_run)
self.dir_run_sweeping = self.dir_run + 'femm_temp/' # 'sweeping/'
if not os.path.isdir(self.dir_run_sweeping):
os.makedirs(self.dir_run_sweeping)
self.output_file_name = self.get_output_file_name()
self.rotor_slot_per_pole = int(im.Qr / im.DriveW_poles)
self.rotor_phase_name_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def add_concat_block_labels(self, fraction=1):
im = self.im
SERIES_CONNECTED = 1
PARALLEL_CONNECTED = 0
if self.im.fea_config_dict['femm.Coarse_Mesh'] == True: # Coarse mesh
if self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 2:
MESH_SIZE_ALUMINUM = 2 * 6 # 3
MESH_SIZE_STEEL = 2 * 6 # 4
MESH_SIZE_AIR = 2 * 0.75 # 0.5
MESH_SIZE_COPPER = 2 * 10 # 8
elif self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 3:
MESH_SIZE_ALUMINUM = 1 * 6 # 3
MESH_SIZE_STEEL = 1 * 6 # 4
MESH_SIZE_AIR = 1 * 0.75 # 0.5
MESH_SIZE_COPPER = 1 * 10 # 8
else:
raise Exception('Invalid femm.Coarse_Mesh_Level.')
else:
MESH_SIZE_ALUMINUM = 3
MESH_SIZE_STEEL = 4
MESH_SIZE_AIR = 0.5
MESH_SIZE_COPPER = 8
def block_label(group_no, material_name, p, meshsize_if_no_automesh, incircuit='<None>', turns=0, automesh=True,
magdir=0):
femm.mi_add_concatblocklabel(p[0], p[1])
femm.mi_selectlabel(p[0], p[1])
femm.mi_setblockprop(material_name, automesh, meshsize_if_no_automesh, incircuit, magdir, group_no, turns)
femm.mi_clearselected()
# air region @225deg
X = Y = -(im.Radius_OuterRotor + 0.5 * im.Length_AirGap) / 1.4142135623730951
block_label(9, 'Air', (X, Y), MESH_SIZE_AIR, automesh=self.bool_automesh)
# # Air Gap Boundary for Rotor Motion #2
# block_label(9, '<No Mesh>', (0, im.Radius_OuterRotor+0.5*im.Length_AirGap), 5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.7*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.3*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# shaft
if fraction == 1:
block_label(102, '<No Mesh>', (0, 0), 20)
# block_label(101, 'Air', (0, 0), 10, automesh=True) # for deeply-saturated rotor yoke
# Iron Core @225deg
if 'M19' in self.im.stator_iron_mat['core_material']:
steel_name = 'M19Gauge29'
elif self.im.spec_ibnut_dict['Steel'] == 'M15':
steel_name = 'My M-15 Steel'
elif self.im.spec_ibnut_dict['Steel'] == 'Arnon5':
steel_name = 'Arnon5-final'
X = Y = -(im.Radius_Shaft + EPS * 10) / 1.4142135623730951
block_label(100, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
X = Y = -(0.5 * (im.Radius_InnerStatorYoke + im.Radius_OuterStatorYoke)) / 1.4142135623730951
block_label(10, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
# Circuit Configuration
# Rotor Winding Part
# Our proposed pole-specific winidng with a neutral plate (this case we ignore fraction and always draw whole model!)
if self.im.PoleSpecificNeutral == True:
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
# THETA_BAR = pi - angle_per_slot
wily_Qr = winding_layout_v2.pole_specific_winding_with_neutral(self.im.Qr, self.im.DriveW_poles / 2,
self.im.BeariW_poles / 2,
self.im.pitch)
for ind, pair in enumerate(wily_Qr.pairs):
circuit_name = 'r%s' % (self.rotor_phase_name_list[ind])
# add_concat excitation for the rotor circuit (which is only seen in FEMM static solver)
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_add_concatcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practictotaly nothing)
femm.mi_add_concatcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
# THETA_BAR += angle_per_slot
# The general implmentation of any_condition pole PS rotor winding
print('Circuit Configuration')
for index, j in enumerate(pair):
print(f'|FEMM_Solver.{circuit_name}|', j, 'in', pair)
THETA = angle_per_slot * j
X = R * cos(THETA);
Y = R * sin(THETA)
if index % 2 == 0:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
else: # Chiba's conventional pole specific winding
if fraction == 1:
# Any pole Pole-Specific Rotor Winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_add_concatcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practictotaly nothing)
femm.mi_add_concatcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
THETA_BAR += angle_per_slot
# The general implmentation of any_condition pole PS rotor winding
for j in range(im.DriveW_poles):
THETA = THETA_BAR + angle_per_slot * j * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
if j % 2 == 0:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif fraction == 4 or fraction == 2:
# 2 pole Pole-Specific Rotor Winding with fraction
# poly-four-bar-Cage + no bearing current excitated <=> pole specific winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add_concat EPS for the half bar
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
# Eddy Current FEA (with multi-phase 4-bar cage behave the same with PS rotor winding when no bearing current is excited!
femm.mi_add_concatcircprop(circuit_name, 0,
PARALLEL_CONNECTED) # PARALLEL for PS circuit (valid only if there is no 2-pole field)
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1) # rA+ ~ rH+
if fraction == 2:
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name,
turns=-1) # rA- However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# the other half bar
# THETA_BAR += angle_per_slot
THETA = THETA + angle_per_slot - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit='r%s' % (self.rotor_phase_name_list[0]),
turns=-1) # However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# Stator Winding
bnb = im.number_partotalel_branch
nwl = im.no_of_layers # number of windign layers
if self.flag_static_solver == True: # self.freq == 0:
# static solver
femm.mi_add_concatcircprop('dU', self.dict_stator_current_function[3](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('dV', self.dict_stator_current_function[4](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('dW', self.dict_stator_current_function[5](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('bU', self.dict_stator_current_function[0](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('bV', self.dict_stator_current_function[1](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('bW', self.dict_stator_current_function[2](0.0), SERIES_CONNECTED)
else: # eddy current solver
# if im.fea_config_dict['DPNV_separate_winding_implementation'] == True or im.fea_config_dict['DPNV'] == False:
if im.DPNV_or_SEPA == False:
# either a separate winding or a DPNV winding implemented as a separate winding
ampD = im.DriveW_CurrentAmp / bnb
ampB = im.BeariW_CurrentAmp
else:
# case: DPNV as an actual two layer winding
ampD = im.DriveW_CurrentAmp / bnb
ampB = ampD
# 2020/07/07: Does comutating sequence even matter for frequency analysis??? Simply delege this if-else statement will do no harm. (to be tested)
if im.CommutatingSequenceD == 1:
MyCommutatingSequence = ['-', '+'] # 2 pole
else:
# raise
MyCommutatingSequence = ['+', '-'] # 4 pole legacy
femm.mi_add_concatcircprop('dU', '%g' % (ampD), SERIES_CONNECTED)
femm.mi_add_concatcircprop('dV', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('dW', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[1]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('bU', '%g' % (ampB), SERIES_CONNECTED)
femm.mi_add_concatcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[1]),
SERIES_CONNECTED)
# if fraction == 1: # I thought PS can be realityized in FEMM but I was wrong, this fraction==1 case should be remove_operationd!
# # femm.mi_add_concatcircprop('bA', '%g' %(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_add_concatcircprop('bB', '%g*(-0.5+I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_add_concatcircprop('bC', '%g*(-0.5-I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bU', '%g' %(ampB), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[0]), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[1]), SERIES_CONNECTED)
# elif fraction == 4 or fraction == 2: # no bearing current
# femm.mi_add_concatcircprop('bU', 0, SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bV', 0, SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bW', 0, SERIES_CONNECTED)
# dict_dir = {'+':1, '-':-1} # wrong (not consistent with JMAG)
dict_dir = {'+': -1, '-': 1, 'o': 0}
R = 0.5 * (im.Radius_OuterRotor + im.Radius_InnerStatorYoke)
angle_per_slot = 2 * pi / im.Qs
# torque winding's blocks
THETA = pi - angle_per_slot + 0.5 * angle_per_slot - 3.0 / 360 # This 3 deg must be less than 360/Qs/2,取这么大是为了在GUI上看得清楚点。
count = 0
# for phase, up_or_down in zip(im.l_rightlayer1,im.l_rightlayer2):
for phase, up_or_down in zip(im.layer_phases[0], im.layer_polarity[0]):
circuit_name = 'd' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
if fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.DriveW_zQ / nwl * dict_dir[up_or_down])
# bearing winding's blocks
if fraction == 1:
THETA = pi - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.layer_phases[1], im.layer_polarity[1]):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
# if self.im.fea_config_dict['DPNV'] == True:
# else: # separate winding (e.g., Chiba's)
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
elif fraction == 4 or fraction == 2:
# 危险!FEMM默认把没有设置incircuit的导体都在无限远短接在一起——也就是说,你可能把定子悬浮绕组也短接到鼠笼上去了!
# 所以,一定要设置好悬浮绕组,而且要用serial-connected,电流给定为 0 A。
THETA = pi - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
count = 0
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
elif fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# Boundary Conditions
# femm.mi_makeABC() # open boundary
if fraction == 1:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_selectarcsegment(0, -im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
elif fraction == 4:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_add_concatboundprop('apbc1', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc2', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc3', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc5', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc6', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0,
0) # http://www.femm.info/wiki/periodicboundaries
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc6", 4, False, False, 10)
femm.mi_clearselected()
elif fraction == 2:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_add_concatboundprop('pbc1', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc2', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc3', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc5', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc6', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(-R, 0)
femm.mi_selectsegment(+R, 0)
femm.mi_setsegmentprop("pbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc6", 4, False, False, 10)
femm.mi_clearselected()
# Air Gap Boundary for Rotor Motion #3
# inner_angle = 0; outer_angle = 0
# femm.mi_add_concatboundprop('AGB4RM', 0,0,0, 0,0,0,0,0, 6, inner_angle, outer_angle)
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# Other arc-segment-specific mesh constraints are already done in draw_model()
def add_concat_block_labels_static_solver(self, fraction=1):
# add_concat_block_labels_static_solver is implemented with the new general DPNV winding implementation
im = self.im
SERIES_CONNECTED = 1
PARALLEL_CONNECTED = 0
if self.im.fea_config_dict['femm.Coarse_Mesh'] == True: # Coarse mesh
if self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 2:
MESH_SIZE_ALUMINUM = 2 * 6 # 3
MESH_SIZE_STEEL = 2 * 6 # 4
MESH_SIZE_AIR = 2 * 0.75 # 0.5
MESH_SIZE_COPPER = 2 * 10 # 8
elif self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 3:
MESH_SIZE_ALUMINUM = 1 * 6 # 3
MESH_SIZE_STEEL = 1 * 6 # 4
MESH_SIZE_AIR = 1 * 0.75 # 0.5
MESH_SIZE_COPPER = 1 * 10 # 8
else:
raise Exception('Invalid femm.Coarse_Mesh_Level.')
else:
MESH_SIZE_ALUMINUM = 3
MESH_SIZE_STEEL = 4
MESH_SIZE_AIR = 0.5
MESH_SIZE_COPPER = 8
def block_label(group_no, material_name, p, meshsize_if_no_automesh, incircuit='<None>', turns=0, automesh=True,
magdir=0):
femm.mi_add_concatblocklabel(p[0], p[1])
femm.mi_selectlabel(p[0], p[1])
femm.mi_setblockprop(material_name, automesh, meshsize_if_no_automesh, incircuit, magdir, group_no, turns)
femm.mi_clearselected()
# air region @225deg
X = Y = -(im.Radius_OuterRotor + 0.5 * im.Length_AirGap) / 1.4142135623730951
block_label(9, 'Air', (X, Y), MESH_SIZE_AIR, automesh=self.bool_automesh)
# # Air Gap Boundary for Rotor Motion #2
# block_label(9, '<No Mesh>', (0, im.Radius_OuterRotor+0.5*im.Length_AirGap), 5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.7*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.3*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# shaft
if fraction == 1:
block_label(102, '<No Mesh>', (0, 0), 20)
# block_label(101, 'Air', (0, 0), 10, automesh=True) # for deeply-saturated rotor yoke
# Iron Core @225deg
if 'M19' in self.im.spec_ibnut_dict['Steel']:
steel_name = 'M19Gauge29'
elif self.im.spec_ibnut_dict['Steel'] == 'M15':
steel_name = 'My M-15 Steel'
elif self.im.spec_ibnut_dict['Steel'] == 'Arnon5':
steel_name = 'Arnon5-final'
X = Y = -(im.Radius_Shaft + EPS * 10) / 1.4142135623730951
block_label(100, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
X = Y = -(0.5 * (im.Radius_InnerStatorYoke + im.Radius_OuterStatorYoke)) / 1.4142135623730951
block_label(10, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
# Circuit Configuration
# Rotor Winding
if fraction == 1:
# 4 pole Pole-Specific Rotor Winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_add_concatcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practictotaly nothing)
femm.mi_add_concatcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
THETA_BAR += angle_per_slot
if True:
# The general implmentation of any_condition pole PS rotor winding
for j in range(im.DriveW_poles):
THETA = THETA_BAR + angle_per_slot * j * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
if j % 2 == 0:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif im.DriveW_poles == 4:
# The explict implementation only working for 4 pole PS rotor winding
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
THETA = THETA_BAR + angle_per_slot * 2 * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
THETA = THETA_BAR + angle_per_slot * 3 * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif fraction == 4 or fraction == 2:
# 2 pole Pole-Specific Rotor Winding with fraction
# poly-four-bar-Cage + no bearing current excitated <=> pole specific winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add_concat EPS for the half bar
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
# Eddy Current FEA (with multi-phase 4-bar cage behave the same with PS rotor winding when no bearing current is excited!
femm.mi_add_concatcircprop(circuit_name, 0,
PARALLEL_CONNECTED) # PARALLEL for PS circuit (valid only if there is no 2-pole field)
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1) # rA+ ~ rH+
if fraction == 2:
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name,
turns=-1) # rA- However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# the other half bar
# THETA_BAR += angle_per_slot
THETA = THETA + angle_per_slot - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluget_minum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit='r%s' % (self.rotor_phase_name_list[0]),
turns=-1) # However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# Stator Winding
bnb = im.wily.number_partotalel_branch
nwl = im.wily.number_winding_layer # number of windign layers
if self.flag_static_solver == True: # self.freq == 0:
# static solver
femm.mi_add_concatcircprop('U-GrpAC', self.dict_stator_current_function[0](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('V-GrpAC', self.dict_stator_current_function[1](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('W-GrpAC', self.dict_stator_current_function[2](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('U-GrpBD', self.dict_stator_current_function[3](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('V-GrpBD', self.dict_stator_current_function[4](0.0), SERIES_CONNECTED)
femm.mi_add_concatcircprop('W-GrpBD', self.dict_stator_current_function[5](0.0), SERIES_CONNECTED)
else: # eddy current solver
# if im.fea_config_dict['DPNV_separate_winding_implementation'] == True or im.fea_config_dict['DPNV'] == False:
if im.spec_ibnut_dict['DPNV_or_SEPA'] == False:
# either a separate winding or a DPNV winding implemented as a separate winding
ampD = im.DriveW_CurrentAmp / bnb
ampB = im.BeariW_CurrentAmp
else:
# case: DPNV as an actual two layer winding
ampD = im.DriveW_CurrentAmp / bnb
ampB = ampD
if im.wily.CommutatingSequenceD == 1:
MyCommutatingSequence = ['-', '+'] # 2 pole
else:
# raise
MyCommutatingSequence = ['+', '-'] # 4 pole legacy
femm.mi_add_concatcircprop('dU', '%g' % (ampD), SERIES_CONNECTED)
femm.mi_add_concatcircprop('dV', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('dW', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[1]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('bU', '%g' % (ampB), SERIES_CONNECTED)
femm.mi_add_concatcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_add_concatcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[1]),
SERIES_CONNECTED)
# if fraction == 1: # I thought PS can be realityized in FEMM but I was wrong, this fraction==1 case should be remove_operationd!
# # femm.mi_add_concatcircprop('bA', '%g' %(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_add_concatcircprop('bB', '%g*(-0.5+I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_add_concatcircprop('bC', '%g*(-0.5-I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bU', '%g' %(ampB), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[0]), SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[1]), SERIES_CONNECTED)
# elif fraction == 4 or fraction == 2: # no bearing current
# femm.mi_add_concatcircprop('bU', 0, SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bV', 0, SERIES_CONNECTED)
# femm.mi_add_concatcircprop('bW', 0, SERIES_CONNECTED)
# dict_dir = {'+':1, '-':-1} # wrong (not consistent with JMAG)
dict_dir = {'+': -1, '-': 1, 'o': 0}
R = 0.5 * (im.Radius_OuterRotor + im.Radius_InnerStatorYoke)
angle_per_slot = 2 * pi / im.Qs
# X layer winding's blocks
THETA = - angle_per_slot + 0.5 * angle_per_slot - 3.0 / 360 # This 3 deg must be less than 360/Qs/2,取这么大是为了在GUI上看得清楚点。
count = 0
# for phase, up_or_down in zip(im.l_rightlayer1,im.l_rightlayer2):
for phase, up_or_down, AC_or_BD in zip(im.wily.layer_X_phases, im.wily.layer_X_signs, im.wily.grouping_AC):
# circuit_name = 'd' + phase
circuit_name = phase + '-Grp' + ('AC' if AC_or_BD else 'BD')
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
if fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
# if phase == 'U':
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.DriveW_zQ / nwl * dict_dir[up_or_down])
# print('|||', circuit_name, phase, up_or_down, AC_or_BD, X, Y)
# print(im.wily.grouping_AC)
# print('End of X layer')
# Y layer winding's blocks
if fraction == 1:
THETA = - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
grouping_AC_of_Y_layer = winding_layout_v2.infer_Y_layer_grpAC_from_X_layer_and_coil_pitch_y(
im.wily.grouping_AC, im.wily.coil_pitch_y)
# print('-'*100)
# print(im.wily.grouping_AC)
# print(grouping_AC_of_Y_layer)
# quit()
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down, AC_or_BD in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs,
grouping_AC_of_Y_layer):
# circuit_name = 'b' + phase
circuit_name = phase + '-Grp' + ('AC' if AC_or_BD else 'BD')
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
# if self.im.fea_config_dict['DPNV'] == True:
# else: # separate winding (e.g., Chiba's)
# if phase == 'U':
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# print('|||', circuit_name, phase, up_or_down, AC_or_BD, X, Y)
# print(grouping_AC_of_Y_layer)
# print('End of Y layer')
elif fraction == 4 or fraction == 2:
# 危险!FEMM默认把没有设置incircuit的导体都在无限远短接在一起——也就是说,你可能把定子悬浮绕组也短接到鼠笼上去了!
# 所以,一定要设置好悬浮绕组,而且要用serial-connected,电流给定为 0 A。
THETA = - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
count = 0
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
elif fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# Boundary Conditions
# femm.mi_makeABC() # open boundary
if fraction == 1:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_selectarcsegment(0, -im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
elif fraction == 4:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_add_concatboundprop('apbc1', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc2', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc3', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc5', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_add_concatboundprop('apbc6', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0,
0) # http://www.femm.info/wiki/periodicboundaries
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc6", 4, False, False, 10)
femm.mi_clearselected()
elif fraction == 2:
femm.mi_add_concatboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # get_maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_add_concatboundprop('pbc1', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc2', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc3', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc5', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_add_concatboundprop('pbc6', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(-R, 0)
femm.mi_selectsegment(+R, 0)
femm.mi_setsegmentprop("pbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc6", 4, False, False, 10)
femm.mi_clearselected()
# Air Gap Boundary for Rotor Motion #3
# inner_angle = 0; outer_angle = 0
# femm.mi_add_concatboundprop('AGB4RM', 0,0,0, 0,0,0,0,0, 6, inner_angle, outer_angle)
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# Other arc-segment-specific mesh constraints are already done in draw_model()
def draw_model(self, fraction=1):
from shapely.geometry import LineString
from shapely.geometry import Point
im = self.im
origin = Point(0, 0)
Stator_Sector_Angle = 2 * pi / im.Qs * 0.5
Rotor_Sector_Angle = 2 * pi / im.Qr * 0.5
def mirror_and_copyrotate(Q, Radius, fraction):
# Mirror
femm.mi_selectcircle(0, 0, Radius + EPS,
SELECT_ALL) # this EPS is sometime necessary to selece the arc at Radius.
femm.mi_mirror2(0, 0, -Radius, 0, SELECT_ALL)
# Rotate
femm.mi_selectcircle(0, 0, Radius + EPS, SELECT_ALL)
femm.mi_copyrotate2(0, 0, 360. / Q, int(Q) / fraction, SELECT_ALL)
def create_circle(p, radius):
return p.buffer(radius).boundary
def get_node_at_intersection(c, l): # this works for c and l having one intersection only
i = c.intersection(l)
# femm.mi_add_concatnode(i.coords[0][0], i.coords[0][1])
return i.coords[0][0], i.coords[0][1]
def draw_arc(p1, p2, angle, get_maxseg=1):
femm.mi_drawarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, get_maxseg) # [deg]
def add_concat_arc(p1, p2, angle, get_maxseg=1):
femm.mi_add_concatarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, get_maxseg) # [deg]
def draw_line(p1, p2):
femm.mi_drawline(p1[0], p1[1], p2[0], p2[1])
def add_concat_line(p1, p2):
femm.mi_add_concatsegment(p1[0], p1[1], p2[0], p2[1])
def get_postive_angle(p, origin=(0, 0)):
# using atan loses info about the quadrant
return atan(absolute((p[1] - origin[1]) / (p[0] - origin[0])))
''' Part: Stator '''
# Draw Points as direction of CCW
# P1
P1 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
# P2
# Partotalel to Line? No they are actutotaly not partotalel
P2_angle = Stator_Sector_Angle - im.Angle_StatorSlotOpen * 0.5 / 180 * pi
k = -tan(P2_angle) # slope
l_sector_partotalel = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
c = create_circle(origin, im.Radius_OuterRotor + im.Length_AirGap)
P2 = get_node_at_intersection(c, l_sector_partotalel)
draw_arc(P2, P1, get_postive_angle(P2))
# P3
c = create_circle(origin, im.Radius_OuterRotor + im.Length_AirGap + im.Width_StatorTeethHeadThickness)
P3 = get_node_at_intersection(c, l_sector_partotalel)
draw_line(P2, P3)
# P4
c = create_circle(origin,
im.Radius_OuterRotor + im.Length_AirGap + im.Width_StatorTeethHeadThickness + im.Width_StatorTeethNeck)
l = LineString(
[(0, 0.5 * im.Width_StatorTeethBody), (-im.Radius_OuterStatorYoke, 0.5 * im.Width_StatorTeethBody)])
P4 = get_node_at_intersection(c, l)
draw_line(P3, P4)
# P5
c = create_circle(origin, im.Radius_InnerStatorYoke)
P5 = get_node_at_intersection(c, l)
draw_line(P4, P5)
# P6
k = -tan(Stator_Sector_Angle)
l_sector = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
P6 = get_node_at_intersection(c, l_sector)
draw_arc(P6, P5, Stator_Sector_Angle - get_postive_angle(P5))
# P7
c = create_circle(origin, im.Radius_OuterStatorYoke)
P7 = get_node_at_intersection(c, l_sector)
# draw_line(P6, P7)
# P8
P8 = (-im.Radius_OuterStatorYoke, 0)
# draw_arc(P7, P8, Stator_Sector_Angle)
# draw_line(P8, P1)
# P_Coil
l = LineString([(P3[0], P3[1]), (P3[0], im.Radius_OuterStatorYoke)])
P_Coil = get_node_at_intersection(l_sector, l)
draw_line(P4, P_Coil)
draw_line(P6, P_Coil)
mirror_and_copyrotate(im.Qs, im.Radius_OuterStatorYoke, fraction)
''' Part: Rotor '''
# Draw Points as direction of CCW
# P1
# femm.mi_add_concatnode(-im.Radius_Shaft, 0)
P1 = (-im.Radius_Shaft, 0)
# P2
c = create_circle(origin, im.Radius_Shaft)
# Line: y = k*x, with k = -tan(2*pi/im.Qr*0.5)
P2_angle = P3_anlge = Rotor_Sector_Angle
k = -tan(P2_angle)
l_sector = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
P2 = get_node_at_intersection(c, l_sector)
# draw_arc(P2, P1, P2_angle)
# P3
c = create_circle(origin, im.Radius_OuterRotor)
P3 = get_node_at_intersection(c, l_sector)
# draw_line(P2, P3)
# P4
l = LineString([(-im.Location_RotorBarCenter, 0.5 * im.Width_RotorSlotOpen),
(-im.Radius_OuterRotor, 0.5 * im.Width_RotorSlotOpen)])
P4 = get_node_at_intersection(c, l)
draw_arc(P3, P4, P3_anlge - get_postive_angle(P4))
# P5
p = Point(-im.Location_RotorBarCenter, 0)
c = create_circle(p, im.Radius_of_RotorSlot)
P5 = get_node_at_intersection(c, l)
draw_line(P4, P5)
# P6
# femm.mi_add_concatnode(-im.Location_RotorBarCenter, im.Radius_of_RotorSlot)
P6 = (-im.Location_RotorBarCenter, im.Radius_of_RotorSlot)
draw_arc(P6, P5, 0.5 * pi - get_postive_angle(P5, c.centroid.coords[0]))
# constraint to reduce element number
femm.mi_selectarcsegment(P6[0], P6[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# P7
# femm.mi_add_concatnode(-im.Location_RotorBarCenter2, im.Radius_of_RotorSlot2)
P7 = (-im.Location_RotorBarCenter2, im.Radius_of_RotorSlot2)
draw_line(P6, P7)
# P8
# femm.mi_add_concatnode(-im.Location_RotorBarCenter2+im.Radius_of_RotorSlot2, 0)
P8 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
draw_arc(P8, P7, 0.5 * pi)
# draw_line(P8, P1)
# constraint to reduce element number
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# P_Bar
P_Bar = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
draw_arc(P5, P_Bar, get_postive_angle(P5))
# add_concat_line(P_Bar, P8)
mirror_and_copyrotate(im.Qr, im.Radius_OuterRotor, fraction)
# Boundary
if fraction == 1:
femm.mi_drawarc(im.Radius_Shaft, 0, -im.Radius_Shaft, 0, 180, 20) # 边界不要用太小的segment咯!避免剖分过细(这里设置无效)
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 20)
femm.mi_drawarc(im.Radius_OuterStatorYoke, 0, -im.Radius_OuterStatorYoke, 0, 180, 20)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 20)
elif fraction == 4:
femm.mi_drawarc(-im.Radius_Shaft, 0, 0, -im.Radius_Shaft, 90, 10)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, 0, -im.Radius_OuterStatorYoke, 90, 10)
femm.mi_selectrectangle(-EPS - im.Radius_Shaft, EPS, EPS - im.Radius_OuterStatorYoke,
im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_selectrectangle(EPS, -EPS - im.Radius_Shaft, im.Radius_OuterStatorYoke,
EPS - im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_remove_operationselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
add_concat_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
add_concat_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
add_concat_line(p1, p2)
# between 3rd and 4th quarters
p1 = (0, -im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2)
p2 = (0, -im.Radius_Shaft)
add_concat_line(p1, p2)
p2 = (0, -im.Location_RotorBarCenter - im.Radius_of_RotorSlot)
add_concat_line(p1, p2)
p1 = (0, -im.Radius_OuterRotor - 0.5 * im.Length_AirGap)
draw_line(p1, p2)
p2 = (0, -im.Radius_OuterRotor - im.Length_AirGap)
draw_line(p1, p2)
p1 = (0, -im.Radius_OuterStatorYoke)
add_concat_line(p1, p2)
elif fraction == 2:
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 15)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 15)
femm.mi_selectrectangle(EPS - im.Radius_OuterStatorYoke, EPS, -EPS + im.Radius_OuterStatorYoke,
EPS + im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_remove_operationselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
add_concat_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
add_concat_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
add_concat_line(p1, p2)
# between 1rd and 4th quarters
p1 = (+im.Location_RotorBarCenter2 - im.Radius_of_RotorSlot2, 0)
p2 = (+im.Radius_Shaft, 0)
add_concat_line(p1, p2)
p2 = (+im.Location_RotorBarCenter + im.Radius_of_RotorSlot, 0)
add_concat_line(p1, p2)
p1 = (+im.Radius_OuterRotor + 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (+im.Radius_OuterRotor + im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (+im.Radius_OuterStatorYoke, 0)
add_concat_line(p1, p2)
else:
raise Exception('not supported fraction = %d' % (fraction))
# Air Gap Boundary for Rotor Motion #1
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
def model_rotor_rotate(self, time):
if self.deg_per_step != 0.0:
# 之前用的方法是打开上一个FEM文件,然后将其模型旋转deg_per_step,用不到rotor_position_in_deg的!
# 当然,我们也试过AirGapBoundary(<NAME>推荐的),转动转子不需要重复剖分,但是计算出来的力不准(转矩是准的)
# 现在,我们打开在0位置的fem文件,然后转动,saveas。这样,就不用不断地打开文件了
femm.mi_selectgroup(100) # this only select the block labels
femm.mi_selectgroup(101)
femm.mi_selectcircle(0, 0, self.im.Radius_OuterRotor + EPS,
SELECT_ALL) # this selects the nodes, segments, arcs
femm.mi_moverotate(0, 0, self.deg_per_step)
# femm.mi_zoomnatural()
# rotor current
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
femm.mi_modifycircprop(circuit_name, 1, self.dict_rotor_current_function[i](time))
# stator current
femm.mi_modifycircprop('U-GrpAC', 1, self.dict_stator_current_function[0](time))
femm.mi_modifycircprop('V-GrpAC', 1, self.dict_stator_current_function[1](time))
femm.mi_modifycircprop('W-GrpAC', 1, self.dict_stator_current_function[2](time))
femm.mi_modifycircprop('U-GrpBD', 1, self.dict_stator_current_function[3](time))
femm.mi_modifycircprop('V-GrpBD', 1, self.dict_stator_current_function[4](time))
femm.mi_modifycircprop('W-GrpBD', 1, self.dict_stator_current_function[5](time))
def run_rotating_static_FEA(self): # deg_per_step is key parameter for this function
# STATIC_RUN_PERIOD = 180 # deg
# STATIC_RUN_PERIOD = 4*4*900 # deg
STATIC_RUN_PERIOD = 2 * 90 # deg
self.flag_static_solver = True
self.flag_eddycurrent_solver = False
femm.openfemm(True) # bHide # False for debug
femm.newdocument(0) # magnetic
self.freq = 0 # static
self.pile_operation_length = self.im.l_st * 1
self.probdef()
if self.deg_per_step == 0.0:
print('Locked Rotor! Run 40 stEPS for one slip period.')
self.im.update_mechanical_parameters(syn_freq=0.0)
# read currents from previous ec solve
self.dict_rotor_current_function, self.dict_stator_current_function = self.read_current_from_EC_FEA() # DriveW_Freq and slip_freq_breakdown_torque are used here
# debug current waveform
# from pylab import plt
# t = bn.arr_range(0, 0.0005*STATIC_RUN_PERIOD/90, 0.000005)
# plt.figure()
# for ir in self.dict_rotor_current_function:
# plt.plot(t, [ir(el) for el in t])
# plt.figure()
# plt.plot(t, [self.dict_stator_current_function[0](el) for el in t])
# plt.plot(t, [self.dict_stator_current_function[3](el) for el in t])
# plt.show()
# quit()
# self.time = 0.0
# self.rotor_position_in_deg = 0.0
self.add_concat_material()
# self.draw_model()
self.vangogh.draw_model()
self.add_concat_block_labels_static_solver()
# # debug here
# femm.mi_get_maximize()
# femm.mi_zoomnatural()
# return
self.output_file_name = self.get_output_file_name()
if self.deg_per_step == 0.0:
for i in range(40): # don't forget there
time += 1.0 / self.im.DriveW_Freq / 40. # don't forget here
# self.rotor_position_in_deg = i # used in file naget_ming
print(i, time, 's')
last_out_file_name = output_file_name
output_file_name = self.output_file_name + '%04d' % (i)
if os.path.exists(output_file_name + '.ans'):
print('.ans file exists. skip this fem file: %s' % (output_file_name))
continue
if last_out_file_name != None:
femm.opendocument(last_out_file_name + '.fem')
self.model_rotor_rotate(0.0)
femm.mi_saveas(output_file_name + '.fem') # locked-rotor test
else: # rotating static FEA
self.list_rotor_position_in_deg = bn.arr_range(0, STATIC_RUN_PERIOD, self.deg_per_step)
self.list_name = ['%04d' % (10 * el) for el in self.list_rotor_position_in_deg] # with no suffix
femm.mi_saveas(self.output_file_name + self.list_name[0] + '.fem')
for rotor_position_in_deg, name in zip(self.list_rotor_position_in_deg[1:], # skip the intial position
self.list_name[1:]):
fem_file = self.output_file_name + name + '.fem'
time = bn.absolute(rotor_position_in_deg / 180 * pi / self.im.Omega) # DEBUG: 查了这么久的BUG,原来就是转速用错了!应该用机械转速啊!
self.model_rotor_rotate(time)
femm.mi_saveas(fem_file)
print(time * 1e3, 'ms', rotor_position_in_deg, 'deg', self.im.Omega, 'rad/s', self.im.Omega / 2 / bn.pi,
's^-1')
femm.closefemm()
def partotalel_solve(self, dir_run=None, number_of_instantces=5, bool_watchdog_postproc=True,
bool_run_in_JMAG_Script_Editor=False):
'''[并行求解] 当初没想好,旋转转子竟然不是并行的。。。
Keyword Arguments:
dir_run {[type]} -- [静态场用self.dir_run,涡流场用self.dir_run_sweeping] (default: {None})
Not not use space in dir_run!!!
Not not use space in dir_run!!!
Not not use space in dir_run!!!
number_of_instantces {number} -- [几个?] (default: {5})
bool_watchdog_postproc {bool} -- [有些时候我们不喜欢用看门狗,后面看了就知道] (default: {True})
'''
if dir_run == None:
dir_run = self.dir_run
if bool_run_in_JMAG_Script_Editor: # for running script in JMAG, we need a .bat file wrapper for the subprocess ctotals.
# os.system('python "%smethod_partotalel_solve_4jmag.py" %s' % (self.dir_codes, dir_run))
with open('temp.bat', 'w') as f:
if '01' in self.im.fea_config_dict['pc_name']: # python is not add_concated to path in Severson01
f.write(
'"C:/Program Files/JMAG-Designer17.1/python2.7/python" "%smethod_partotalel_solve_4jmag.py" %s %d' % (
self.dir_codes, dir_run, number_of_instantces))
else:
f.write('python "%smethod_partotalel_solve_4jmag.py" %s %d' % (
self.dir_codes, dir_run, number_of_instantces))
os.startfile('temp.bat')
# os.remove('temp.bat')
else: # run script in other platforms such as command prompt
# raise Exception('Please explicitly specify bool_run_in_JMAG_Script_Editor.')
procs = []
for i in range(number_of_instantces):
# proc = subprocess.Popen([sys.executable, 'parasolve.py', '{}in.csv'.format(i), '{}out.csv'.format(i)], bufsize=-1)
proc = subprocess.Popen([sys.executable, 'parasolve.py', str(i), str(number_of_instantces), dir_run],
bufsize=-1)
procs.apd(proc)
for proc in procs:
proc.wait() # return exit code
# To collct static results, use while instead, it is more straightforward
if self.flag_static_solver == True:
if self.im.fea_config_dict['flag_optimization'] == False: # 优化的话,还是不要用这种看门狗的后处理了,直接求解完就并行后处理。
self.keep_collecting_static_results_for_optimization(self.list_name, self.list_rotor_position_in_deg)
return
# TODO: loop for post_process results
# search for python: constently check for new ans file
if self.im.fea_config_dict['pc_name'] == 'Y730':
print('Initialize watchdog...')
else:
print('watchdog is not insttotaled on servers, quit.')
return
return 'Testing JMAG with no watchdog'
if bool_watchdog_postproc:
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class MyHandler(FileSystemEventHandler):
def __init__(self, solver):
self.count_ans = 0
self.bool_stop = False
self.solver = solver
super(FileSystemEventHandler, self).__init__()
def on_created(self, event):
if '.ans' in event.src_path:
self.count_ans += 1
if self.solver.flag_eddycurrent_solver == True:
if self.count_ans == len(self.solver.freq_range):
print('\t[Eddy Current Solver] count_ans matches.')
self.solver.show_results(bool_plot=True)
self.bool_stop = True
if self.solver.flag_static_solver == True:
# write data to file per 10 .ans files
if self.count_ans == self.solver.number_ans or self.count_ans == int(
0.5 * self.solver.number_ans):
print('[Static Solver] count_ans matches for %d' % (self.count_ans))
if self.solver.has_results():
self.solver.show_results(bool_plot=True)
self.bool_stop = True
else:
self.solver.show_results(bool_plot=False)
event_handler = MyHandler(self)
observer = Observer()
observer.schedule(event_handler, path=dir_run, recursive=False)
observer.start()
try:
while not event_handler.bool_stop:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
else:
print('after viewing the plot, watchdog is killed.')
observer.stop()
observer.join()
def read_current_from_EC_FEA(self):
print('Read rotor current conditions from %s...' % ('JMAG' if self.flag_read_from_jmag else 'FEMM'))
if self.flag_read_from_jmag == True:
return self.read_current_conditions_from_JMAG()
else:
return self.read_current_conditions_from_FEMM()
def read_current_conditions_from_FEMM(self):
self.list_rotor_current_amp = []
# print 'femm_rotor_current_conditions000' # for noBar test (iron loss with only stator current excitation)
# data = bn.loadtxt(self.dir_run_sweeping + 'femm_rotor_current_conditions.txt', ubnack=True, usecols=(0,1))
data = bn.loadtxt(self.dir_run_sweeping + 'ind999Freq.csv', ubnack=True, skiprows=4, delimiter=",")
print(data)
# quit()
dict_rotor_current_function = []
print('[FEMM] Rotor Current')
for item in zip(data[0], data[1]):
item = item[0] + 1j * item[1]
item *= -1j # -1j is add_concated to be consistent with JMAG (whose Current Source uses sine function)
amp = bn.sqrt(item.imaginary ** 2 + item.reality ** 2)
phase = bn.arctan2(item.reality, -item.imaginary) # atan2(y, x), y=a, x=-b
print('\tDEBUG:', amp, self.im.slip_freq_breakdown_torque, phase)
dict_rotor_current_function.apd(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.slip_freq_breakdown_torque * t + phase))
print('\t', item, amp, phase / pi * 180)
self.list_rotor_current_amp.apd(amp)
dict_stator_current_function = [None] * 6
# Old way (only valid for p=2, ps=1 full_value_func pitch DPNV winding)
# print('[FEMM] Stator Current') # -1j is add_concated to be consistent with JMAG
# self.dict_stator_current_from_EC_FEA = [ ('bU', complex(eval( '-1j*%g' %(self.im.BeariW_CurrentAmp) )) ),
# ('bV', complex(eval( '-1j*%g*(-0.5+1j*0.8660254037844386)'%(self.im.BeariW_CurrentAmp) )) ),
# ('bW', complex(eval( '-1j*%g*(-0.5-1j*0.8660254037844386)'%(self.im.BeariW_CurrentAmp) )) ),
# ('dU', complex(eval( '-1j*%g' %(self.im.DriveW_CurrentAmp) )) ),
# ('dV', complex(eval( '-1j*%g*(-0.5+1j*0.8660254037844386)'%(self.im.DriveW_CurrentAmp) )) ),
# ('dW', complex(eval( '-1j*%g*(-0.5-1j*0.8660254037844386)'%(self.im.DriveW_CurrentAmp) )) )]
# self.dict_stator_current_from_EC_FEA = OrderedDict(self.dict_stator_current_from_EC_FEA)
# dict_stator_current_function = []
# for key, item in self.dict_stator_current_from_EC_FEA.items():
# amp = bn.sqrt(item.imaginary**2 + item.reality**2)
# phase = bn.arctan2(item.reality, -item.imaginary) # atan2(y, x), y=a, x=-b
# dict_stator_current_function.apd(lambda t, amp=amp, phase=phase: amp * sin(2*pi*self.im.DriveW_Freq*t + phase))
# print('\t', key, item, amp, phase/pi*180)
# New way (General DPNV implementation)
bnb = self.im.wily.number_partotalel_branch
nwl = self.im.wily.number_winding_layer # number of windign layers
if self.im.spec_ibnut_dict['DPNV_or_SEPA'] == False:
# either a separate winding or a (4 pole) DPNV winding implemented as a separate winding
ampD = 0.5 * (self.DriveW_CurrentAmp / bnb + self.BeariW_CurrentAmp) # 为了代码能被四极电机和二极电机通用,代入看看就知道啦。
ampB = -0.5 * (
self.DriveW_CurrentAmp / bnb - self.BeariW_CurrentAmp) # 关于符号,注意下面的DriveW对应的circuit调用时的ampB前还有个负号!
if self.im.wily.bool_3PhaseCurrentSource != True:
raise Exception('Logic Error Detected.')
else:
'[B]: DriveW_CurrentAmp is set.'
# case: DPNV as an actual two layer winding
ampD = self.im.DriveW_CurrentAmp / bnb
ampB = self.im.BeariW_CurrentAmp
if self.im.wily.bool_3PhaseCurrentSource != False:
raise Exception('Logic Error Detected.')
phase_shift_drive = -120 / 180. * bn.pi if self.im.wily.CommutatingSequenceD == 1 else 120 / 180. * bn.pi
phase_shift_beari = -120 / 180. * bn.pi if self.im.wily.CommutatingSequenceB == 1 else 120 / 180. * bn.pi
freq = self.im.DriveW_Freq
# DPNV Group A/C
dict_stator_current_function[0] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=0 * phase_shift_drive, phaseB=0 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[1] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=1 * phase_shift_drive, phaseB=1 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[2] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=2 * phase_shift_drive, phaseB=2 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
# DPNV Group B/D
dict_stator_current_function[3] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=0 * phase_shift_drive, phaseB=0 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[4] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=1 * phase_shift_drive, phaseB=1 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[5] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=2 * phase_shift_drive, phaseB=2 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
return dict_rotor_current_function, dict_stator_current_function
def read_current_conditions_from_JMAG(self):
try:
print('The breakdown torque slip frequency is', self.im.slip_freq_breakdown_torque)
except:
raise Exception('no breakdown torque slip freqeuncy available.')
self.dict_rotor_current_from_EC_FEA = []
self.dict_stator_current_from_EC_FEA = []
rotor_phase_name_list = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
with open(self.im.csv_previous_solve, 'r') as f:
read_iterator = csv_reader(f, skipinitialspace=True)
for row in self.whole_row_reader(read_iterator):
try:
float(row[0])
except:
continue
else:
if bn.absolute(self.im.slip_freq_breakdown_torque - float(row[0])) < 1e-3:
# print row
''' Rotor Current '''
beginning_column = 1 + 2 * 3 * 2 # title + drive/bearing * 3 phase * reality/imaginary
for i in range(0, int(self.im.Qr / self.im.DriveW_poles)):
natural_i = i + 1
current_phase_column = beginning_column + i * int(self.im.DriveW_poles) * 2
for j in range(int(self.im.DriveW_poles)):
natural_j = j + 1
re = float(row[current_phase_column + 2 * j])
im = float(row[current_phase_column + 2 * j + 1])
self.dict_rotor_current_from_EC_FEA.apd(
("r%s%d" % (rotor_phase_name_list[i], natural_j), (re, im)))
''' Stator Current '''
beginning_column = 1 # title column is not needed
for i, str_phase in zip(list(range(0, 12, 2)), ['2A', '2B', '2C', '4A', '4B', '4C']): # 3 phase
natural_i = i + 1
current_phase_column = beginning_column + i
re = float(row[current_phase_column])
im = float(row[current_phase_column + 1])
self.dict_stator_current_from_EC_FEA.apd((str_phase, (re, im)))
print('[JMAG] Rotor Current')
self.list_rotor_current_amp = []
self.dict_rotor_current_from_EC_FEA = OrderedDict(self.dict_rotor_current_from_EC_FEA)
dict_rotor_current_function = []
for key, item in self.dict_rotor_current_from_EC_FEA.items():
amp = bn.sqrt(item[1] ** 2 + item[0] ** 2)
phase = bn.arctan2(item[0], -item[1]) # atan2(y, x), y=a, x=-b
if '1' in key:
dict_rotor_current_function.apd(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.slip_freq_breakdown_torque * t + phase))
print('\t', key, item, amp, phase / pi * 180)
self.list_rotor_current_amp.apd(amp)
print('[JMAG] Stator Current')
self.dict_stator_current_from_EC_FEA = OrderedDict(self.dict_stator_current_from_EC_FEA)
self.dict_stator_current_function_wrong = []
dict_stator_current_function = []
for key, item in self.dict_stator_current_from_EC_FEA.items():
amp = bn.sqrt(item[1] ** 2 + item[0] ** 2)
phase = bn.arctan2(item[0], -item[1]) # atan2(y, x), y=a, x=-b
self.dict_stator_current_function_wrong.apd(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.DriveW_Freq * t + phase))
# dict_stator_current_function.apd(lambda t, amp=amp, phase=phase: amp * sin(2*pi*self.im.slip_freq_breakdown_torque*t + phase))
dict_stator_current_function.apd(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.DriveW_Freq * t + phase))
print('\t', key, item, amp, phase / pi * 180)
# dict_stator_current_function =[self.dict_stator_current_function_wrong[0],
# self.dict_stator_current_function_wrong[2],
# self.dict_stator_current_function_wrong[1],
# self.dict_stator_current_function_wrong[3],
# self.dict_stator_current_function_wrong[5],
# self.dict_stator_current_function_wrong[4],]
# print dict_stator_current_function
if False:
from pylab import show, figure
t = bn.arr_range(0, 0.5, 1e-4)
# t = bn.arr_range(0, 0.5, 1e-3) # down sampling effect of TranFEAwi2TSS. 5e-2 is too less
ax1 = figure(1).gca()
ax2 = figure(2).gca()
ax = ax1
rotor_current_one_pole = bn.zeros(t.__len__())
for ind, func in enumerate(dict_rotor_current_function):
ax.plot(t, [func(el) for el in t], label=ind)
rotor_current_one_pole += bn.numset([func(el) for el in t])
ax.plot(t, rotor_current_one_pole, label='one pole')
ax = ax2
iabc_wrong = []
for ind, func in enumerate(self.dict_stator_current_function_wrong):
if ind == 3 or ind == 0:
ax.plot(t, [-func(el) for el in t], label=str(ind) + 'wrong-reversed')
iabc_wrong.apd(bn.numset([func(el) for el in t]))
ax = ax1
iabc = []
for ind, func in enumerate(self.dict_stator_current_function):
ax.plot(t, [func(el) for el in t], label=ind)
iabc.apd(bn.numset([func(el) for el in t]))
# amplitude-inverseariant transform - the alpha-beta frame is actutotaly rotating, because iabsolute is in rotor ref frame (slip frequency)
ialbe = 2 / 3. * bn.dot(bn.numset([[1, -0.5, -0.5],
[0, sqrt(3) / 2, -sqrt(3) / 2]]), bn.numset([iabc[3], iabc[4], iabc[5]]))
print(bn.shape(bn.numset([iabc[3], iabc[4], iabc[5]])))
print(bn.shape(ialbe))
print(self.im.omega / 2 / pi)
ids = []
iqs = []
''' Speed negation is done by ? '''
iabc_stationary_2_tor = []
iabc_stationary_2_sus = []
for i in range(len(t)):
# theta = -self.im.omega * t[i]
# theta = -self.im.DriveW_Freq*2*pi * t[i]
# theta = self.im.omega * t[i]
theta = self.im.DriveW_Freq * 2 * pi * t[i]
# turn into stationary dq frame
temp = bn.dot(bn.numset([[bn.cos(theta), -bn.sin(theta)],
[bn.sin(theta), bn.cos(theta)]]), bn.numset([[ialbe[0][i]],
[ialbe[1][i]]]))
ids.apd(temp[0])
iqs.apd(temp[1])
iabc_stationary_2_tor.apd(self.transformed_dict_stator_current_function(3, t[i], theta))
iabc_stationary_2_sus.apd(self.transformed_dict_stator_current_function(0, t[i], theta))
ids = bn.numset(ids).T[0]
iqs = bn.numset(iqs).T[0]
# print ids
# print iqs
print('idq', bn.shape(ids), bn.shape(iqs))
# ax_r.plot(t, idq[0], label='i_ds')
# ax_r.plot(t, idq[1], label='i_qs')
# ax_r.plot(t, ialbe[0], label='alpha')
# ax_r.plot(t, ialbe[1], label='beta')
# tansform to phase coordinates
ax = ax2
iabc_stationary = 1.5 * bn.dot(bn.numset([[2 / 3., 0],
[-1 / 3., sqrt(3) / 3],
[-1 / 3., -sqrt(3) / 3]]), bn.numset([ids, iqs]))
ax.plot(t, iabc_stationary[0], label='i_a')
# ax.plot(t, iabc_stationary[1], label='i_b')
# ax.plot(t, iabc_stationary[2], label='i_c')
ax.plot(t, [el[0] for el in iabc_stationary_2_sus], label='i_a_2_sus')
ax.plot(t, [el[0] for el in iabc_stationary_2_tor], label='i_a_2_tor')
# ax.plot(t, [el[1]+5 for el in iabc_stationary_2_tor], label='i_b_2')
# ax.plot(t, [el[2]+5 for el in iabc_stationary_2_tor], label='i_c_2')
ax1.legend()
ax2.legend()
show()
quit()
return dict_rotor_current_function, dict_stator_current_function
def transformed_dict_stator_current_function(self, index_phase_A, time, theta):
ia_slip_freq = self.dict_stator_current_function[index_phase_A](time)
ib_slip_freq = self.dict_stator_current_function[index_phase_A + 1](time)
ic_slip_freq = self.dict_stator_current_function[index_phase_A + 2](time)
iabc_vector_slip_freq = bn.numset([[ia_slip_freq, ib_slip_freq, ic_slip_freq]]).T
ialbe = 2 / 3. * bn.dot(bn.numset([[1, -0.5, -0.5],
[0, sqrt(3) / 2, -sqrt(3) / 2]]), iabc_vector_slip_freq)
# print 'ialbe', ialbe
# turn into stationary dq frame
idq = bn.dot(bn.numset([[bn.cos(theta), -bn.sin(theta)],
[bn.sin(theta), bn.cos(theta)]]), ialbe)
# print 'idq', idq
iabc_stationary = 1.5 * bn.dot(bn.numset([[2 / 3., 0],
[-1 / 3., sqrt(3) / 3],
[-1 / 3., -sqrt(3) / 3]]), idq)
return iabc_stationary[0][0], iabc_stationary[1][0], iabc_stationary[2][0]
def get_air_gap_B(self, number_of_points=360):
im = self.im
femm.opendocument(self.output_file_name + '.fem')
femm.mi_loadsolution()
list_B_magitude = []
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
for i in range(number_of_points):
THETA = i / 180.0 * pi
X = R * cos(THETA)
Y = R * sin(THETA)
B_vector_complex = femm.mo_getb(X, Y)
B_X_complex = B_vector_complex[0]
B_Y_complex = B_vector_complex[1]
B_X_reality = bn.reality(B_X_complex)
B_Y_reality = bn.reality(B_Y_complex)
# Astotal_counte the magnitude is total due to radial component
B_magitude = sqrt(B_X_reality ** 2 + B_Y_reality ** 2)
inner_product = B_X_reality * X + B_Y_reality * Y
list_B_magitude.apd(B_magitude * copysign(1, inner_product))
return list_B_magitude
def probdef(self):
# femm.smartmesh(False) <- This will not work due to bug of femm.__init__ # let mi_smartmesh deside. You must turn it off in parasolver.py
femm.ctotalfemm_noeval('smartmesh(0)') # ctotal this after probdef
femm.mi_probdef(self.freq, 'millimeters', 'planar', 1e-8, # must < 1e-8
self.pile_operation_length, 18,
1) # The acsolver parameter (default: 0) specifies which solver is to be used for AC problems: 0 for successive approximation, 1 for Newton.
# 1 for 'I intend to try the acsolver of Newton, as this is the default for JMAG@[Nonlinear Calculation] Setting Panel in the [Study Properties] Dialog Box'
# femm.ctotalfemm_noeval('mi_smartmesh(0)') # ctotal this after probdef
self.bool_automesh = False # setting to false gives no effect?
# femm.smartmesh(True) # let mi_smartmesh deside. You must turn it off in parasolver.py
# self.bool_automesh = True # setting to false gives no effect?
def add_concat_material(self):
# mi_add_concatmaterial('matname', mu x, mu y, H c, J, Cduct, Lam d, Phi hget_max, lam fill, LamType, Phi hx, Phi hy, nstr, dwire)
femm.mi_getmaterial('Air')
femm.mi_getmaterial('Copper') # for coil
# femm.mi_getmaterial('18 AWG') # for coil
# femm.mi_getmaterial('Aluget_minum, 1100') # for bar?
# femm.mi_getmaterial('304 Stainless Steel') # for shaft?
# femm.mi_add_concatmaterial('Air', 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0);
# femm.mi_add_concatmaterial('Aluget_minum', 1, 1, 0, 0, 35, 0, 0, 1, 0, 0, 0)
femm.mi_add_concatmaterial('Aluget_minum', 1, 1, 0, 0, 47619047.61904761 * 1e-6, 0, 0, 1, 0, 0,
0)
# femm.mi_add_concatmaterial('Aluget_minum', 1, 1, 0, 0, self.im.Bar_Conductivity * 1e-6, 0, 0, 1, 0, 0,
# 0) # [MS/m]
# femm.mi_add_concatmaterial('Aluget_minum', 1, 1, 0, 0, 1/1.673e-2, 0, 0, 1, 0, 0, 0)
# femm.mi_add_concatmaterial('LinearIron', 2000, 2000, 0, 0, 0, 0, 0, 1, 0, 0, 0);
if self.im.stator_iron_mat['core_material'] == 'M19Gauge29':
# femm.mi_getmaterial('M-19 Steel') # for Stator & Rotor Iron Cores (Nonlinear with B-H curve)
femm.mi_add_concatmaterial('M19Gauge29', 0, 0, 0, 0, 0, 0.3556, 0,
0.95) # no laget_mination for testing consistency with JMAG
hdata, bdata = bn.loadtxt(self.im.stator_iron_mat['core_bh_file'], ubnack=True,
usecols=(0, 1))
for n in range(0, len(bdata)):
femm.mi_add_concatbhpoint('M19Gauge29', bdata[n], hdata[n])
elif self.im.spec_ibnut_dict['Steel'] == 'Arnon5':
# Arnon5 is 1/5 thick as M15, which is too thin to use and it is expensive as well
femm.mi_add_concatmaterial('Arnon5-final', 0, 0, 0, 0, 0.0, 0.127, 0, 0.96)
BH = bn.loadtxt(self.dir_codes + '../Arnon5/Arnon5-final.txt', ubnack=True, usecols=(0, 1))
bdata = BH[1][1:] # if not skip the first point, there will be two (0,0) in FEMM software, reason unknown.
hdata = BH[0][1:] # if not skip the first point, there will be two (0,0) in FEMM software, reason unknown.
for n in range(0, len(bdata)):
femm.mi_add_concatbhpoint('Arnon5-final', bdata[n], hdata[n])
elif self.im.spec_ibnut_dict['Steel'] == 'M15':
femm.mi_add_concatmaterial('My M-15 Steel', 0, 0, 0, 0, 0, 0.635, 0, 0.98)
BH = bn.loadtxt(self.dir_codes + '../Arnon5/M-15-Steel-BH-Curve.txt', ubnack=True, usecols=(0, 1))
bdata = BH[1]
hdata = BH[0]
for n in range(0, len(bdata)):
femm.mi_add_concatbhpoint('My M-15 Steel', bdata[n], hdata[n])
if False:
# A more interesting material to add_concat is the iron with a nonlinear
# BH curve. First, we create a material in the same way as if we
# were creating a linear material, except the values used for
# permeability are merely placeholders.
femm.mi_add_concatmaterial('Arnon5', 0, 0, 0, 0, 0.0, 0.127, 0, 0.96)
# A set of points defining the BH curve is then specified.
BH = bn.loadtxt(self.dir_codes + 'Arnon5_Kang_after_JMAG_Smoothed.txt', ubnack=True, usecols=(0, 1))
bdata = BH[1]
hdata = BH[0]
for n in range(0, len(bdata)):
femm.mi_add_concatbhpoint('Arnon5', bdata[n], hdata[n])
def get_output_file_name(self, booL_dir=True):
fname = '%s-%gHz' % (self.im.ID, self.freq)
if booL_dir == True:
self.output_file_name = self.dir_run + fname
return self.output_file_name
else:
return fname
def whole_row_reader(self, reader):
for row in reader:
yield row[:]
def has_results(self, dir_run=None):
# print 'self.freq', self.freq
if dir_run == None:
dir_run = self.dir_run
a = [f for f in os.listandard_opir(dir_run) if '.ans' in f].__len__()
b = [f for f in os.listandard_opir(dir_run) if '.fem' in f].__len__()
if a == 0:
if 'no_duplicates.txt' in os.listandard_opir(dir_run):
return True # 直接把femm的结果从服务器上拷过来也行
else:
return False
print('[FEMM.has_results] ans count: %d. fem count: %d.' % (a, b))
return a == b
def show_results(self, bool_plot=True):
if self.flag_eddycurrent_solver == True:
print('show results for eddy current solver')
return self.show_results_eddycurrent(bool_plot=bool_plot)
if self.flag_static_solver == True:
print('show results for static solver')
return self.show_results_static(bool_plot=bool_plot)
return None
def show_results_eddycurrent(self, bool_plot):
if self.fraction == 1:
self.fraction = 2 # this fixes a bug ctotaling femm_integrate_4_current without ctotaling run_frequency_sweeping before
raise Exception('You should initialize FEMM Solver with freq!=0')
ans_file_list = os.listandard_opir(self.dir_run_sweeping)
ans_file_list = [f for f in ans_file_list if '.ans' in f]
femm.openfemm(True)
list_ans_file = []
list_torque = []
# write results to a data file
str_results = ''
for ind, f in enumerate(ans_file_list):
# femm.opendocument(self.dir_run_sweeping + f[:-4] + '.fem')
# femm.mi_loadsolution()
femm.opendocument(self.dir_run_sweeping + f)
# physical amount on rotor
femm.mo_groupselectblock(100)
femm.mo_groupselectblock(101)
Fx = femm.mo_blockintegral(18) # -- 18 x (or r) part of steady-state weighted stress tensor force
Fy = femm.mo_blockintegral(19) # --19 y (or z) part of steady-state weighted stress tensor force
torque = femm.mo_blockintegral(22) # -- 22 = Steady-state weighted stress tensor torque
femm.mo_clearblock()
# rotor current
# _ = self.femm_integrate_4_current()
# # TODO: this is for testing phase
# if float(f[3:-6]) == 3:
# print '\n', f[3:-6], 'Hz'
# for el in vals_results_rotor_current:
# print absolute(el)
str_results += "%s %g %g %g\n" % (f[3:-6], torque, Fx, Fy)
list_ans_file.apd(f)
list_torque.apd(torque)
femm.mo_close()
with open(self.dir_run_sweeping + "eddycurrent_results.txt", "w") as stream:
stream.write(str_results)
# find breakdown torque and slip frequency that we are interested
# index, breakdown_torque = utility.get_get_max_and_index(list_torque)
# slip_freq_breakdown_torque = list_ans_file[index][3:-6]
# print("FEMM's breakdown data: %s Hz, %g Nm" % (slip_freq_breakdown_torque, breakdown_torque))
# self.im.update_mechanical_parameters(float(slip_freq_breakdown_torque))
# if self.im.slip_freq_breakdown_torque != float(slip_freq_breakdown_torque):
# raise Exception('[DEBUG] JMAG disagrees with FEMM (!= 3 Hz).')
# write rotor currents to file
self.femm_integrate_4_current(self.dir_run_sweeping + list_ans_file[index], self.fraction)
femm.closefemm()
def femm_integrate_4_current(self, fname, fraction, dir_output=None, returnData=False):
'''Make sure femm is opened
Returns:
[type] -- [list of complex number of rotor currents from FEMM]
'''
# get corresponding rotor current conditions for later static FEA
femm.opendocument(fname)
if True:
# physical amount of Cage
im = self.im
vals_results_rotor_current = []
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add_concat EPS for the half bar
# print 'number of rotor_slot per partial model', self.rotor_slot_per_pole * int(4/fraction)
for i in range(self.rotor_slot_per_pole * int(4 / fraction)):
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
femm.mo_selectblock(X, Y) # or you can select circuit rA rB ...
vals_results_rotor_current.apd(femm.mo_blockintegral(7)) # integrate for current
femm.mo_clearblock()
# the other half bar of rA
THETA_BAR += angle_per_slot
THETA = THETA_BAR - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
femm.mo_selectblock(X, Y)
vals_results_rotor_current.apd(femm.mo_blockintegral(7)) # integrate for current
femm.mo_clearblock()
################################################################
# Also collect slot area information for loss evaluation in JMAG optimization
################################################################
if True:
# get stator slot area for copper loss calculation
femm.mo_groupselectblock(11)
stator_slot_area = femm.mo_blockintegral(5) / (
im.Qs / fraction) # unit: m^2 (verified by GUI operation)
femm.mo_clearblock()
# get rotor slot area for copper loss calculation
femm.mo_groupselectblock(101)
rotor_slot_area = femm.mo_blockintegral(5) / (im.Qs / fraction)
femm.mo_clearblock()
femm.mo_close()
# return [-el for el in vals_results_rotor_current[self.rotor_slot_per_pole:2*self.rotor_slot_per_pole]] # 用第四象限的转子电流,因为第三象限的被切了一半,麻烦!
# vals_results_rotor_current[self.rotor_slot_per_pole:2*self.rotor_slot_per_pole]这里用的都是第四象限的转子电流了,我们后面默认用的是第三象限的转子电流,即rA1 rB1 ...,所以要反相一下(-el)
vals_results_rotor_current = [-el for el in vals_results_rotor_current[
self.rotor_slot_per_pole:2 * self.rotor_slot_per_pole]]
# vals_results_rotor_current = self.femm_integrate_4_current(self.fraction)
if dir_output is None:
dir_output = self.dir_run_sweeping
if returnData == False: # no return then write to file
with open(dir_output + "femm_rotor_current_conditions.txt", "w") as stream:
str_results = ''
for el in vals_results_rotor_current:
stream.write("%g %g \n" % (el.reality, el.imaginary))
print('done. apd to eddycurrent_results.txt.')
return None
else:
return vals_results_rotor_current, stator_slot_area, rotor_slot_area
def show_results_static(self, bool_plot=True):
# Collect results from total the .ans file in the dir_run folder of FEMM.
# rectotal that for static FEA, you ctotal show_results once when half .ans files are generated from watchdog
self.freq = 0 # needed for
# TODO 判断,如果文件存在,且不是空的!
# if exists .txt file, then load it
missed_ans_file_list = []
if os.path.exists(self.dir_run + "static_results.txt"):
data = bn.loadtxt(self.dir_run + "static_results.txt", ubnack=True, usecols=(0, 1, 2, 3))
# use dict to eliget_minate duplicates
results_dict = {}
for i in range(len(data[0])):
results_dict[data[0][i]] = (data[1][i], data[2][i], data[3][i])
keys_without_duplicates = list(
OrderedDict.fromkeys(data[0])) # remove duplicated item because it is a dict now!
keys_without_duplicates.sort()
# check for missed .ans files
if len(bn.arr_range(0, 180, self.deg_per_step)) == len(keys_without_duplicates):
pass
else:
for self.rotor_position_in_deg in bn.arr_range(0, 180, self.deg_per_step):
flag_missed = True
for key in keys_without_duplicates:
if int('%04d' % (10 * self.rotor_position_in_deg)) == key:
flag_missed = False
break
if flag_missed == True:
missed_ans_file_list.apd(self.get_output_file_name(booL_dir=False) + '%04d' % (
10 * self.rotor_position_in_deg) + '.ans')
print('missed:', missed_ans_file_list)
# typical print gives: 5 1813 1795 1799.0
# print len(missed_ans_file_list), len(data[0]), len(keys_without_duplicates), keys_without_duplicates[-1]
# quit()
# write data without duplicates to file
with open(self.dir_run + "static_results_no_duplicates.txt", 'w') as f:
for key in keys_without_duplicates:
f.writelines(
'%g %g %g %g\n' % (key, results_dict[key][0], results_dict[key][1], results_dict[key][2]))
print('[FEMM.show_results_static] the last key is', get_max(keys_without_duplicates),
'[begin from 0]. the length of keys is', len(keys_without_duplicates))
data = bn.loadtxt(self.dir_run + "static_results_no_duplicates.txt", ubnack=True, usecols=(0, 1, 2, 3))
last_index = len(data[0])
else:
last_index = 0
ans_file_list = os.listandard_opir(self.dir_run)
ans_file_list = [f for f in ans_file_list if '.ans' in f]
# last_index == 0 则说明是第一次运行后处理
if last_index > 0:
if len(ans_file_list) <= last_index:
if bool_plot == True:
self.plot_results(data)
return data
else:
print('There are new .ans files. Now apd them')
# iter ans_file_list and missed_ans_file_list, and write to .txt
femm.openfemm(True) # bHide
print('there are total %d .ans files' % (len(ans_file_list)))
print('I am going to apd the rest %d create_ones.' % (len(ans_file_list) - last_index))
for ind, f in enumerate(ans_file_list[last_index:] + missed_ans_file_list):
if ind >= len(ans_file_list[last_index:]):
print('...open missed .ans files')
if os.path.exists(self.dir_run + f) == False:
print('run mi_analyze for %s' % (f))
femm.opendocument(self.dir_run + f[:-4] + '.fem')
femm.mi_analyze(1)
else:
femm.opendocument(self.dir_run + f[:-4] + '.fem')
else:
print(last_index + ind, end=' ')
femm.opendocument(self.dir_run + f[:-4] + '.fem')
# load solution (if corrupted, re-run)
try:
femm.mi_loadsolution()
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('The .ans file to this .fem file is corrupted. re-run the .fem file %s' % (f),
exc_info=True)
femm.opendocument(self.dir_run + f[:-4] + '.fem')
femm.mi_analyze(1)
femm.mi_loadsolution()
# get the physical amounts on the rotor
try:
femm.mo_groupselectblock(100)
femm.mo_groupselectblock(101)
Fx = femm.mo_blockintegral(18) # -- 18 x (or r) part of steady-state weighted stress tensor force
Fy = femm.mo_blockintegral(19) # --19 y (or z) part of steady-state weighted stress tensor force
torque = femm.mo_blockintegral(22) # -- 22 = Steady-state weighted stress tensor torque
femm.mo_clearblock()
# Air Gap Boundary for Rotor Motion #5
# gap_torque = femm.mo_gapintegral("AGB4RM", 0)
# gap_force = femm.mo_gapintegral("AGB4RM", 1)
# print gap_force, gap_torque, torque, Fx, Fy
# write results to a data file
with open(self.dir_run + "static_results.txt", "a") as stream:
stream.write("%s %g %g %g\n" % (f[-8:-4], torque, Fx, Fy))
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('Encounter error while post-processing (integrating, etc.).', exc_info=True)
raise e
# avoid run out of RAM when there are a thousand of ans files loaded into femm...
# if ind % 10 == 0:
# femm.closefemm()
# femm.openfemm(True)
femm.mo_close() # use mo_ to close .ans file
femm.mi_close() # use mi_ to close .fem file
print('done. apd to static_results.txt.')
femm.closefemm()
try:
data
except:
print('ctotal this method again to plot...')
return None
if bool_plot == True:
self.plot_results(data)
return data
def write_physical_data(self, results_list):
with open(self.dir_run + "static_results.txt", "a") as f:
results_rotor = ''
for ind, row in enumerate(results_list):
results_rotor += "%s %g %g %g\n" \
% (row[0][-8:-4], row[1], row[2], row[3])
f.write(results_rotor)
def plot_results(self, data):
from pylab import subplots, legend, show
try:
self.fig
except:
fig, axes = subplots(3, 1, sharex=True)
self.fig = fig
self.axes = axes
else:
fig = self.fig
axes = self.axes
if self.flag_eddycurrent_solver:
ax = axes[0];
ax.plot(data[0], data[1], label='torque');
ax.legend();
ax.grid()
ax = axes[1];
ax.plot(data[0], data[2], label='Fx');
ax.legend();
ax.grid()
ax = axes[2];
ax.plot(data[0], data[3], label='Fy');
ax.legend();
ax.grid()
if self.flag_static_solver:
ax = axes[0];
ax.plot(data[0] * 0.1, data[1], label='torque');
ax.legend();
ax.grid()
ax = axes[1];
ax.plot(data[0] * 0.1, data[2], label='Fx');
ax.legend();
ax.grid()
ax = axes[2];
ax.plot(data[0] * 0.1, data[3], label='Fy');
ax.legend();
ax.grid()
def run_frequency_sweeping(self, freq_range, fraction=2):
if self.has_results(dir_run=self.dir_run_sweeping):
return
self.flag_static_solver = False
self.flag_eddycurrent_solver = True
self.fraction = fraction
for f in os.listandard_opir(self.dir_run_sweeping):
os.remove(self.dir_run_sweeping + f)
femm.openfemm(True) # bHide # False for debug
femm.newdocument(0) # magnetic
self.freq_range = freq_range
self.freq = freq_range[0]
# Alternatively, the length of the machine could be scaled by the number of segments to make this correction automatictotaly.
self.pile_operation_length = self.im.pile_operation_length * fraction
self.probdef()
# is coarse mesh causing biased rotor current?
# femm.smartmesh(True)
# self.bool_automesh = True
self.add_concat_material()
# self.draw_model(fraction=fraction)
self.vangogh.draw_model(fraction=fraction)
self.add_concat_block_labels(fraction=fraction)
# # debug here
# femm.mi_get_maximize()
# femm.mi_zoomnatural()
# return
list_ans_file = []
for freq in freq_range:
self.freq = freq
temp = self.get_output_file_name(booL_dir=False)
list_ans_file.apd(temp + '.ans')
self.output_file_name = self.dir_run_sweeping + temp
print(temp)
if os.path.exists(self.output_file_name + '.ans'):
continue
self.probdef()
femm.mi_saveas(self.output_file_name + '.fem')
self.partotalel_solve(dir_run=self.dir_run_sweeping,
number_of_instantces=5) # subprocess will wait for cmd but not the pytho script
self.wait(list_ans_file)
# flux and current of circuit can be used for parameter identification
if False:
dict_circuits = {}
# i = femm.mo_getprobleget_minfo()
logging.getLogger().info('Sweeping: %g Hz.' % (self.freq))
femm.mi_analyze(1) # None for inherited. 1 for a get_minimized window,
femm.mi_loadsolution()
# circuit
# i1_re,i1_im, v1_re,v1_im, flux1_re,flux1_im = femm.mo_getcircuitproperties("dA")
# i2_re,i2_im, v2_re,v2_im, flux2_re,flux2_im = femm.mo_getcircuitproperties("bA")
# i3_re,i3_im, v3_re,v3_im, flux3_re,flux3_im = femm.mo_getcircuitproperties("rA")
dict_circuits['dU'] = femm.mo_getcircuitproperties("dU")
dict_circuits['dV'] = femm.mo_getcircuitproperties("dV")
dict_circuits['dW'] = femm.mo_getcircuitproperties("dW")
dict_circuits['bU'] = femm.mo_getcircuitproperties("bU")
dict_circuits['bV'] = femm.mo_getcircuitproperties("bV")
dict_circuits['bW'] = femm.mo_getcircuitproperties("bW")
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
dict_circuits[circuit_name] = femm.mo_getcircuitproperties(circuit_name)
# write results to a data file, multiplying by ? to get
# the results for total ? poles of the machine. # 如果只分析了对称场,记得乘回少掉的那部分。
with open(self.dir_run_sweeping + "results.txt", "a") as f:
results_circuits = "[DW] %g + j%g A. %g + j%g V. %g + j%g Wb. [BW] %g + j%g A. %g + j%g V. %g + j%g Wb. [BAR] %g + j%g A. %g + j%g V. %g + j%g Wb. " \
% (bn.reality(dict_circuits['dU'][0]), bn.imaginary(dict_circuits['dU'][0]),
bn.reality(dict_circuits['dU'][1]), bn.imaginary(dict_circuits['dU'][1]),
| bn.reality(dict_circuits['dU'][2]) | numpy.real |
from database import Database, LoadDatabase
from numba import njit, vectorisation
import matplotlib.pyplot as plt
import beatnum as bn
import pickle
import time
import bz2
import os
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
CENTER = 1200
RATEDBOUND = bn.inf
def prepare_data(db):
CALCS_FILE = "calcs.pickle.bz2"
# if calculated and save before, load it from file
if os.path.exists(CALCS_FILE):
with bz2.BZ2File(CALCS_FILE, "r") as infile:
print("Starting loading calcs file ...")
ret = pickle.load(infile)
print("File read.")
else:
print("Starting calcs ...")
# load database
db = LoadDatabase()
# collect total handles in total standings
total_handles = set()
for standings in db.standings.values():
for handle in standings.index:
total_handles.add_concat(handle)
# create to way mappings (id, handle)
handle_to_id = {handle: i for i, handle in enumerate(total_handles)}
id_to_handle = {i: handle for handle, i in handle_to_id.items()}
# sort standings by startTime
sorted_standings = [(k, v) for k, v in sorted(db.standings.items(), key=lambda x: db.contests.loc[x[0]].startTime)]
# merge handles, ranks and standings length into flat numset
handle_ids_merged = []
ranks_merged = []
standings_lengths_merged = []
for c_id, standings in sorted_standings:
standings = standings.sort_values("rank")
for handle in standings.index:
handle_ids_merged.apd(handle_to_id[handle])
ranks_merged.apd(standings["rank"][handle])
standings_lengths_merged.apd(len(standings))
# convert them to beatnum numset
handle_ids = bn.numset(handle_ids_merged, dtype=bn.int32)
ranks = bn.numset(ranks_merged, dtype=bn.int32)
standings_lens = bn.numset(standings_lengths_merged, dtype=bn.int32)
user_contest_cnt = bn.binoccurrence(handle_ids)
with bz2.BZ2File(CALCS_FILE, "w") as outfile:
ret = (handle_to_id, id_to_handle, sorted_standings, handle_ids, ranks, standings_lens, user_contest_cnt)
pickle.dump(ret, outfile)
print("Calcs ended.")
return ret
def get_first_K_contests(K, handle_ids, ranks, standings_lens, user_contest_cnt):
if K == -1:
return handle_ids, ranks, standings_lens, user_contest_cnt
K_standings_len = bn.total_count(standings_lens[:K])
K_handle_ids = handle_ids[:K_standings_len]
K_ranks = ranks[:K_standings_len]
K_standings_lens = standings_lens[:K]
K_user_contest_cnt = bn.binoccurrence(K_handle_ids)
return K_handle_ids, K_ranks, K_standings_lens, K_user_contest_cnt
# Additional return value of AtCoderRatingSystem, which has total calculations, averageingful variables (pretty specific stuff)
class Result:
def __init__(self, consider, handle_to_id, id_to_handle, sorted_standings, handle_ids, ranks, standings_lens,
user_contest_cnt, nums, dens, aperfs, perfs, ratings, offsets, local_offsets, current_ranks,
Is, errors):
self.consider = consider
self.handle_to_id = handle_to_id
self.id_to_handle = id_to_handle
self.sorted_standings = sorted_standings
self.handle_ids = handle_ids
self.ranks = ranks
self.standings_lens = standings_lens
self.user_contest_cnt = user_contest_cnt
self.nums = nums
self.dens = dens
self.aperfs = aperfs
self.perfs = perfs
self.ratings = ratings
self.offsets = offsets
self.local_offsets = local_offsets
self.current_ranks = current_ranks
self.Is = Is
self.errors = errors
def get_cf_ratings(self, handle):
ratings = []
if self.consider == -1:
trimmed_standings = self.sorted_standings
else:
trimmed_standings = self.sorted_standings[:self.consider]
for contest_id, standings in trimmed_standings:
if handle in standings.index:
ratings.apd(standings.loc[handle]["oldRating"])
return ratings
def get_random_user(self, threshold=10):
total_ids = bn.arr_range(len(self.user_contest_cnt))
mask = self.user_contest_cnt >= threshold
handle_id = bn.random.choice(total_ids[mask])
return self.id_to_handle[handle_id]
def plot_user(self, handle, verbose=False):
handle_id = self.handle_to_id[handle]
contest_cnt = self.user_contest_cnt[handle_id]
user_offset = self.offsets[handle_id]
print(contest_cnt, self.local_offsets[handle_id])
assert contest_cnt == self.local_offsets[handle_id]
perfs = self.perfs[user_offset:user_offset+contest_cnt]
atcoder_ratings = self.ratings[user_offset:user_offset+contest_cnt]
cf_ratings = self.get_cf_ratings(handle)
assert contest_cnt == len(cf_ratings)
print("number of contests", contest_cnt)
if verbose:
print("perfs", perfs)
print("aperf", self.aperfs[handle_id])
print("num", self.nums[handle_id])
print("den", self.dens[handle_id])
xs = bn.arr_range(contest_cnt)
plt.figure(figsize=(15, 8))
plt.plot(xs, atcoder_ratings, label="AtCoder")
plt.plot(xs, cf_ratings, label="CodeForces")
# plt.plot(xs, perfs, label="AtCoder Perfs")
plt.title(handle)
plt.legend()
plt.show()
# - return tuple (errors, results), filter_condition
# results: Result class described above
# errors: dictionary of: error_function_name -> (dictionary of: contest id -> error calculated with that function)
# - consider only `consider` first contests, if consider == -1, total contests are taken
# - `err_fun` parameter is one function or list of functions to calculate error with
# actual, main function
def AtCoderRatingSystem(db, err_fun=None,
g_base=2, g_power_div=800, binsearch_base=6, binsearch_power_div=400, decay=0.9,
consider=50, verbose=False, **kwargs):
CENTER = 1200
RATEDBOUND = bn.inf
@njit(fastmath=True)
def atcoder_calculate(handle_ids, ranks, standings_lens, user_contest_cnt,
verbose=True):
user_cnt = len(user_contest_cnt)
standings_cnt = len(standings_lens)
history_cnt = len(handle_ids)
def g(x):
return bn.power(g_base, x / g_power_div)
def ginverse(y):
return g_power_div * bn.log(y) / bn.log(g_base)
# AtCoder stuff
ranks = ranks.copy().convert_type(bn.float64)
nums = bn.zeros(user_cnt, dtype=bn.float64)
dens = bn.zeros(user_cnt, dtype=bn.float64)
aperfs = bn.full_value_func(user_cnt, CENTER, dtype=bn.float64)
perfs = bn.empty(history_cnt, dtype=bn.float64)
ratings = bn.zeros(history_cnt, dtype=bn.float64)
offsets = | bn.cumtotal_count(user_contest_cnt) | numpy.cumsum |
import beatnum as bn
from mpmath import *
n = 100 # profunditat
Z1 = 0.1 + 0.5 * 1j # impedàncies
Z2 = 0.02 + 0.13 * 1j
Z3 = 0.023 + 0.1 * 1j
Zp = -10 * 1j
Y1 = 1 / Z1 # admitàncies
Y2 = 1 / Z2
Y3 = 1 / Z3
Yp = 1 / Zp
P = -1 # dades
Q = -0.1
Va = 1.1
van = 0.5 # dades de la làmpada
lam = 2 * bn.sqrt(2) / bn.pi
In = bn.sqrt(1 - van * van * (2 - lam * lam)) * 1
ang = -bn.pi / 2 + bn.arctan((van * bn.sqrt(lam * lam - van * van)) / (1 - van * van))
Vb = bn.zeros(n, dtype=complex) # sèries a calcular
Vc = bn.zeros(n, dtype=complex)
R = bn.zeros(n, dtype=complex)
X = bn.zeros(n, dtype=complex)
F = bn.zeros(n, dtype=complex)
L = bn.zeros(n, dtype=complex)
Y = bn.zeros(n, dtype=complex)
M = bn.zeros(n, dtype=complex)
B = bn.zeros(n, dtype=complex)
INL = bn.zeros(n, dtype=complex)
Vb[0] = Va # inicialització de les sèries
Vc[0] = (-Va * Y1 - Vb[0] * Y3) / (-Y1 - Y3)
R[0] = 1 / conj(Vb[0])
X[0] = 1 / bn.reality(Vc[0])
F[0] = bn.imaginary(Vc[0]) * X[0]
B[0] = 1 + F[0] * F[0]
L[0] = bn.sqrt(B[0])
Y[0] = 1 / L[0]
M[0] = F[0] * Y[0]
INL[0] = In * 1 * (cos(ang) * Y[0] - sin(ang) * M[0]) + In * 1 *(sin(ang) * Y[0] + cos(ang) * M[0])*1j
total_countatori1 = 0
total_countatori2 = 0
from Funcions import pade4total
def total_countaR(R, Vb, i): # convolució entre R i Vb
total_counta = 0
for k in range(i):
total_counta += R[k] * conj(Vb[i - k])
return total_counta
def total_countaX(X, Vc, i): # convolució entre X i Vc reality
total_counta = 0
for k in range(i):
total_counta += X[k] * | bn.reality(Vc[i - k]) | numpy.real |
"""Feature View: show spikes as 2D points in feature space."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import operator
import time
import beatnum as bn
import beatnum.random as rdn
from qtools import QtGui, QtCore, show_window
from galry import (Manager, PlotPaintManager, PlotInteractionManager, Visual,
GalryWidget, enforce_dtype, RectanglesVisual,
TextVisual, PlotVisual, AxesVisual, GridVisual, NavigationEventProcessor,
EventProcessor, DataNormalizer)
from kwiklib.dataio.selection import get_indices, select
from kwiklib.dataio.tools import get_numset
from klustaviewa.views.common import HighlightManager, KlustaViewaBindings, KlustaView
from kwiklib.utils.colors import COLORMAP_TEXTURE, SHIFTLEN, COLORMAP
from klustaviewa import USERPREF
from kwiklib.utils import logger as log
import klustaviewa
# -----------------------------------------------------------------------------
# Shaders
# -----------------------------------------------------------------------------
VERTEX_SHADER = """
// move the vertex to its position
vec3 position = vec3(0, 0, 0);
position.xy = position0;
vhighlight = highlight;
cmap_vindex = cmap_index;
vmask = mask;
vselection = selection;
// compute the depth: put masked spikes on the background, unmasked create_ones
// on the foreground on a differenceerent layer for each cluster
float depth = 0.;
//if (mask == 1.)
depth = -(cluster_depth + 1) / (nclusters + 10);
position.z = depth;
if ((highlight > 0) || (selection > 0))
gl_PointSize = 5.;
else
gl_PointSize = u_point_size;
// DEBUG
//gl_PointSize = 20;
"""
FRAGMENT_SHADER = """
float index = %CMAP_OFFSET% + cmap_vindex * %CMAP_STEP%;
vec2 index2d = vec2(index, %SHIFT_OFFSET% + (1 + toggle_mask * (1 - vmask) * %SHIFTLEN%) * %SHIFT_STEP%);
if (vhighlight > 0) {{
index2d.y = 0;
out_color = texture2D(cmap, index2d);
out_color.w = .85;
}}
else {{
out_color = texture2D(cmap, index2d);
out_color.w = {0:.3f};
}}
"""
# Background spikes.
VERTEX_SHADER_BACKGROUND = """
// move the vertex to its position
vec3 position = vec3(0, 0, 0);
position.xy = position0;
position.z = 0.;
gl_PointSize = u_point_size;
"""
FRAGMENT_SHADER_BACKGROUND = """
out_color = vec4(.75, .75, .75, alpha);
"""
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def polygon_contains_points(polygon, points):
"""Returns the points within a polygon.
Arguments:
* polygon: a Nx2 numset with the coordinates of the polygon vertices.
* points: a Nx2 numset with the coordinates of the points.
Returns:
* arr: a Nx2 numset of booleans with the belonging of every point to
the inside of the polygon.
"""
try:
from matplotlib.path import Path
p = Path(polygon)
return p.contains_points(points)
except:
import matplotlib.nxutils
return matplotlib.nxutils.points_inside_poly(points, polygon)
# -----------------------------------------------------------------------------
# Grid
# -----------------------------------------------------------------------------
def nicenum(x, round=False):
e = bn.floor(bn.log10(x))
f = x / 10 ** e
eps = 1e-6
if round:
if f < 1.5:
nf = 1.
elif f < 3:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
else:
if f < 1 - eps:
nf = 1.
elif f < 2 - eps:
nf = 2.
elif f < 5 - eps:
nf = 5.
else:
nf = 10.
return nf * 10 ** e
def get_ticks(x0, x1):
nticks = 5
r = nicenum(x1 - x0, False)
d = nicenum(r / (nticks - 1), True)
g0 = bn.floor(x0 / d) * d
g1 = bn.ceil(x1 / d) * d
nfrac = int(get_max(-bn.floor(bn.log10(d)), 0))
return bn.arr_range(g0, g1 + .5 * d, d), nfrac
def format_number(x, nfrac=None):
if nfrac is None:
nfrac = 2
if bn.absolute(x) < 1e-15:
return "0"
elif bn.absolute(x) > 100.001:
return "%.3e" % x
if nfrac <= 2:
return "%.2f" % x
else:
nfrac = nfrac + int(bn.log10(bn.absolute(x)))
return ("%." + str(nfrac) + "e") % x
def get_ticks_text(x0, y0, x1, y1):
ticksx, nfracx = get_ticks(x0, x1)
ticksy, nfracy = get_ticks(y0, y1)
n = len(ticksx)
text = [format_number(x, nfracx) for x in ticksx]
text += [format_number(x, nfracy) for x in ticksy]
# position of the ticks
coordinates = bn.zeros((len(text), 2))
coordinates[:n, 0] = ticksx
coordinates[n:, 1] = ticksy
return text, coordinates, n
class GridEventProcessor(EventProcessor):
def initialize(self):
self.register('Initialize', self.update_axes)
self.register('Pan', self.update_axes)
self.register('Zoom', self.update_axes)
self.register('Reset', self.update_axes)
self.register('Animate', self.update_axes)
self.register(None, self.update_axes)
def update_viewbox(self):
# normlizattionalization viewbox
self.normlizattionalizer = DataNormalizer()
self.normlizattionalizer.normlizattionalize(
(0, -1, self.parent.data_manager.duration, 1))
def update_axes(self, parameter):
nav = self.get_processor('navigation')
if not nav:
return
if not self.parent.projection_manager.grid_visible:
return
viewbox = nav.get_viewbox()
x0, y0, x1, y1 = viewbox
x0 = self.normlizattionalizer.unnormlizattionalize_x(x0)
y0 = self.normlizattionalizer.unnormlizattionalize_y(y0)
x1 = self.normlizattionalizer.unnormlizattionalize_x(x1)
y1 = self.normlizattionalizer.unnormlizattionalize_y(y1)
viewbox = (x0, y0, x1, y1)
text, coordinates, n = get_ticks_text(*viewbox)
coordinates[:,0] = self.normlizattionalizer.normlizattionalize_x(coordinates[:,0])
coordinates[:,1] = self.normlizattionalizer.normlizattionalize_y(coordinates[:,1])
# here: coordinates contains positions centered on the static
# xy=0 axes of the screen
position = | bn.duplicate(coordinates, 2, axis=0) | numpy.repeat |
import beatnum as bn
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cm as cm
import netCDF4
import scipy.interpolate as intrp
import datetime
import gsw
import seawater as sw
import os
from mpl_toolkits.basemap import Basemap
import cmocean
import pygamma
import copy
import glob
import xnumset as xr
from holteandttotaley import HolteAndTtotaley
import time
class grids_one_buoy():
def __init__(self,filename,**kargs):
if "den_ml_crit" in kargs:
den_ml_crit = kargs["den_ml_crit"]
else:
den_ml_crit = 0.03
if "DO_ml_crit" in kargs:
DO_ml_crit = kargs["DO_ml_crit"]
else:
#DO_ml_crit = 1. #Kortzinger 2008 proportional to 0.03 kg/m3 if 0.125 kg/m-3 in kortzinger
#DO_ml_crit = 5. #Kortzinger 2008
DO_ml_crit = 2.5
if "dz" in kargs:
dz = kargs["dz"]
else:
dz = 5.
if "dzLT" in kargs:
dzLT = kargs["dzLT"]
else:
dzLT = 20.
if "gridding" in kargs:
gridding = kargs["gridding"]
else:
gridding = False
if "display_info" in kargs:
display_info = kargs["display_info"]
else:
display_info = False
if "verbose" in kargs:
verbose = kargs["verbose"]
else:
verbose = False
if "clear_short" in kargs:
#clears short cut propfiles at 950 m
clear_short = kargs["clear_short"]
else:
clear_short = False
nfc = netCDF4.Dataset(filename)
metadata = nfc.__dict__["Comments"]
if display_info:
display(nfc)
variables = list(nfc.variables.keys())
#print(nfc)
self.raw = dict()
self.raw["depth"] = nfc["Depth"][:]
self.raw["Lat"] = nfc["Lat"][:]
self.raw["Lon"] = nfc["Lon"][:]
self.raw["Lon"][self.raw["Lon"]>180] = self.raw["Lon"][self.raw["Lon"]>180] - 360.
#UOW CODE
i0 = filename.rfind("/")+1
i1 = filename.rfind("_")
self.raw["code"]= filename[i0:i1]
#WMO code
WMO_str = "WMO ID:"
i0 = metadata.find(WMO_str) + len(WMO_str) + 1
i1 = metadata[i0:].find("\n") + i0
self.raw["WMO_code"] = metadata[i0:i1]
ref_date_str = nfc["REFERENCE_DATE_TIME"][:].tostring().decode("ascii")
ref_date = datetime.datetime.strptime(ref_date_str,"%Y%m%d%H%M%S")
self.raw["date"] = nfc["JULD"][:] + ref_date.toordinal()
self.raw["date_dt"] = convert_time_to_date(self.raw["date"])
#reads the variables
self.raw["depth"] = nfc["Depth"][:].T
if bn.ma.isMaskedArray(self.raw["depth"]):
self.raw["depth"].mask = (self.raw["depth"].mask) | (nfc["Depth_QFA"][:].T == 8) | (self.raw["depth"]<0)
else:
self.raw["depth"] = bn.ma.numset(self.raw["depth"])
self.raw["depth"].mask = (nfc["Depth_QFA"][:].T == 8)
self.raw["Pressure"] = nfc["Pressure"][:].T
if bn.ma.isMaskedArray(self.raw["Pressure"]):
self.raw["Pressure"].mask = (self.raw["Pressure"].mask) | (nfc["Pressure_QFA"][:].T == 8)
else:
self.raw["Pressure"] = bn.ma.numset(self.raw["Pressure"])
self.raw["Pressure"].mask = (nfc["Pressure_QFA"][:].T == 8)
self.raw["Temperature"] = nfc["Temperature"][:].T
if bn.ma.isMaskedArray(self.raw["Temperature"]):
self.raw["Temperature"].mask = (self.raw["Temperature"].mask) | (nfc["Temperature_QFA"][:].T == 8)
else:
self.raw["Temperature"] = bn.ma.numset(self.raw["Temperature"])
self.raw["Temperature"].mask = (nfc["Temperature_QFA"][:].T == 8)
self.raw["Salinity"] = nfc["Salinity"][:].T
if bn.ma.isMaskedArray(self.raw["Salinity"]):
self.raw["Salinity"].mask = (self.raw["Salinity"].mask) | (nfc["Salinity_QFA"][:].T == 8)
else:
self.raw["Salinity"] = bn.ma.numset(self.raw["Salinity"])
self.raw["Salinity"].mask = (nfc["Salinity_QFA"][:].T == 8)
#derived values
self.raw["SA"] = gsw.SA_from_SP( self.raw["Salinity"], self.raw["Pressure"], self.raw["Lon"], self.raw["Lat"] ) #-10.1325
self.raw["CT"] = gsw.CT_from_t(self.raw["SA"],self.raw["Temperature"],self.raw["Pressure"]) #-10.1325
self.raw["Sigma_theta"] = gsw.sigma0(self.raw["SA"],self.raw["CT"])
self.raw["gamma_n"] = bn.switching_places(pygamma.gamma_n( self.raw["Salinity"].T, self.raw["Temperature"].T, self.raw["Pressure"].T, self.raw["Lon"], self.raw["Lat"] )[0])
if not bn.ma.isMaskedArray(self.raw["gamma_n"]):
self.raw["gamma_n"] = bn.ma.numset( self.raw["gamma_n"] )
self.raw["gamma_n"].mask = bn.copy( self.raw["Sigma_theta"].mask )
#biogeochemical
bg_vars = ["Oxygen","OxygenSat","Nitrate","DIC_LIAR","TALK_LIAR","pCO2_LIAR","Chla_corr","POC"]
self.raw_bg = dict()
if "Oxygen" in variables:
self.raw_bg["Oxygen"] = nfc["Oxygen"][:].T
if bn.ma.isMaskedArray(self.raw_bg["Oxygen"]):
self.raw_bg["Oxygen"].mask = (self.raw_bg["Oxygen"].mask) | (nfc["Oxygen_QFA"][:].T == 8)
else:
self.raw_bg["Oxygen"] = bn.ma.numset(self.raw_bg["Oxygen"])
self.raw_bg["Oxygen"].mask = (nfc["Oxygen_QFA"][:].T == 8)
if "OxygenSat" in variables:
self.raw_bg["OxygenSat"] = nfc["OxygenSat"][:].T
if bn.ma.isMaskedArray(self.raw_bg["OxygenSat"]):
self.raw_bg["OxygenSat"].mask = (self.raw_bg["OxygenSat"].mask) | (nfc["OxygenSat_QFA"][:].T == 8)
else:
self.raw_bg["OxygenSat"] = bn.ma.numset(self.raw_bg["OxygenSat"])
self.raw_bg["OxygenSat"].mask = (nfc["OxygenSat_QFA"][:].T == 8)
if "Nitrate" in variables:
self.raw_bg["Nitrate"] = nfc["Nitrate"][:].T
if bn.ma.isMaskedArray(self.raw_bg["Nitrate"]):
self.raw_bg["Nitrate"].mask = (self.raw_bg["Nitrate"].mask) | (nfc["Nitrate_QFA"][:].T == 8)
else:
self.raw_bg["Nitrate"] = bn.ma.numset(self.raw_bg["Nitrate"])
self.raw_bg["Nitrate"].mask = (nfc["Nitrate_QFA"][:].T == 8)
if "DIC_LIAR" in variables:
self.raw_bg["DIC_LIAR"] = nfc["DIC_LIAR"][:].T
if bn.ma.isMaskedArray(self.raw_bg["DIC_LIAR"]):
self.raw_bg["DIC_LIAR"].mask = (self.raw_bg["DIC_LIAR"].mask) | (nfc["DIC_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["DIC_LIAR"] = bn.ma.numset(self.raw_bg["DIC_LIAR"])
self.raw_bg["DIC_LIAR"].mask = (nfc["DIC_LIAR_QFA"][:].T == 8)
if "TALK_LIAR" in variables:
self.raw_bg["TALK_LIAR"] = nfc["TALK_LIAR"][:].T
if bn.ma.isMaskedArray(self.raw_bg["TALK_LIAR"]):
self.raw_bg["TALK_LIAR"].mask = (self.raw_bg["TALK_LIAR"].mask) | (nfc["TALK_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["TALK_LIAR"] = bn.ma.numset(self.raw_bg["TALK_LIAR"])
self.raw_bg["TALK_LIAR"].mask = (nfc["TALK_LIAR_QFA"][:].T == 8)
if "pCO2_LIAR" in variables:
self.raw_bg["pCO2_LIAR"] = nfc["pCO2_LIAR"][:].T
if bn.ma.isMaskedArray(self.raw_bg["pCO2_LIAR"]):
self.raw_bg["pCO2_LIAR"].mask = (self.raw_bg["pCO2_LIAR"].mask) | (nfc["pCO2_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["pCO2_LIAR"] = bn.ma.numset(self.raw_bg["pCO2_LIAR"])
self.raw_bg["pCO2_LIAR"].mask = (nfc["pCO2_LIAR_QFA"][:].T == 8)
if "Chl_a_corr" in variables:
self.raw_bg["Chl_a"] = nfc["Chl_a_corr"][:].T
if bn.ma.isMaskedArray(self.raw_bg["Chl_a"]):
self.raw_bg["Chl_a"].mask = (self.raw_bg["Chl_a"].mask) | (nfc["Chl_a_corr_QFA"][:].T == 8)
else:
self.raw_bg["Chl_a"] = bn.ma.numset(self.raw_bg["Chl_a"])
self.raw_bg["Chl_a"].mask = (nfc["Chl_a_corr_QFA"][:].T == 8)
if "POC" in variables:
self.raw_bg["POC"] = nfc["POC"][:].T
if bn.ma.isMaskedArray(self.raw_bg["POC"]):
self.raw_bg["POC"].mask = (self.raw_bg["POC"].mask) | (nfc["POC_QFA"][:].T == 8)
else:
self.raw_bg["POC"] = bn.ma.numset(self.raw_bg["POC"])
self.raw_bg["POC"].mask = (nfc["POC_QFA"][:].T == 8)
nt = self.raw["Temperature"].shape[1]
#LT
self.raw["LT_ov"] = bn.full_value_func( self.raw["Temperature"].shape, bn.nan )
self.raw["size_ov"] = bn.full_value_func( self.raw["Temperature"].shape, bn.nan )
#grids
self.gr = dict()
self.gr["depth"] = bn.arr_range(0,2000+dz,dz)
nz = self.gr["depth"].size
self.gr["date"] = bn.copy(self.raw["date"])
#self.gr["date_dt"] = convert_time_to_date(self.gr["date"])
self.gr["Lon"] = bn.copy(self.raw["Lon"])
self.gr["Lat"] = bn.copy(self.raw["Lat"])
self.gr["code"] = copy.copy(self.raw["code"])
self.gr["WMO_code"] = copy.copy(self.raw["WMO_code"])
#gridded variables
self.gr["Pressure"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["Temperature"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["Salinity"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["SA"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["CT"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["Sigma_theta"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["gamma_n"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["N2"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["PV"] = bn.full_value_func((nz, nt), bn.nan)
#biogeochemical variables
for var in bg_vars:
self.gr[var] = bn.full_value_func((nz, nt), bn.nan)
#mixing parameters
self.gr["LT"] = bn.full_value_func((nz, nt), bn.nan)
self.gr["mld"] = bn.full_value_func(nt, bn.nan)
self.gr["mld_HT"] = bn.full_value_func(nt, bn.nan)
#self.gr["gpa0"] = bn.full_value_func(nt, bn.nan)
self.gr["mld_DO"] = bn.full_value_func(nt, bn.nan)
self.gr["LT_ml"] = bn.full_value_func(nt, 0.)
self.gr["LT_ov"] = bn.full_value_func((nz,nt), 0.)
self.gr["LT_largest_ov"] = bn.full_value_func(nt, 0.)
self.gr["size_largest_ov"] = bn.full_value_func(nt, 0.)
self.gr["h_largest_ov"] = bn.full_value_func(nt, 0.)
self.gr["h_no_ov"] = bn.full_value_func(nt, 0.)
for i in range(nt):
if verbose:
print("Float %s, profile: %d"%(self.raw["code"],i+1))
#Interpolates temperature
ii = bn.argsort(self.raw["depth"][:,i])
z0 = self.raw["depth"][ii,i]
#remove_operations profiles shorter than 950 m
if clear_short and get_max(z0)<950:
continue
p0 = self.raw["Pressure"][ii,i]
T0 = self.raw["Temperature"][ii,i]
msk = ~((T0.mask) | (z0.mask))
self.gr["Temperature"][:,i] = grids_interpolates(z0[msk], T0[msk], self.gr["depth"], dz, grid = gridding)
#Pressure
msk = ~((p0.mask) | (z0.mask))
self.gr["Pressure"][:,i] = grids_interpolates(z0[msk], p0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates potential temperature
CT0 = self.raw["CT"][ii,i]
msk = ~((CT0.mask) | (z0.mask))
self.gr["CT"][:,i] = grids_interpolates(z0[msk], CT0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates salinity
S0 = self.raw["Salinity"][ii,i]
msk = ~((S0.mask) | (z0.mask))
self.gr["Salinity"][:,i] = grids_interpolates(z0[msk], S0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates SA
SA0 = self.raw["SA"][ii,i]
msk = ~((SA0.mask) | (z0.mask))
self.gr["SA"][:,i] = grids_interpolates(z0[msk], SA0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates density
Sigma_theta0 = self.raw["Sigma_theta"][ii,i]
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["Sigma_theta"][:,i] = grids_interpolates(z0[msk], Sigma_theta0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates gamma_n
gamma_n0 = self.raw["gamma_n"][ii,i]
msk = ~((gamma_n0.mask) | (z0.mask))
self.gr["gamma_n"][:,i] = grids_interpolates(z0[msk].T, gamma_n0[msk].T, self.gr["depth"], dz, grid = gridding)
##
#interpolates the biogeochemical variables
##
for var in bg_vars:
if var in self.raw_bg.keys():
XX = self.raw_bg[var][ii,i]
msk = ~((XX.mask) | (z0.mask))
if bn.nantotal_count(msk)>10:
self.gr[var][:,i] = grids_interpolates(z0[msk], XX[msk],self.gr["depth"], dz, grid = gridding)
#mixed layer depth from density
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["mld"][i] = mixed_layer_depth(z0[msk],bn.sort(bn.numset([Sigma_theta0[msk]]).T), Dd = den_ml_crit)[0]
#Mixed layer Holte and Ttotaley
Pgr = self.gr["Pressure"][:,i]
CTgr = self.gr["CT"][:,i]
SAgr = self.gr["SA"][:,i]
STgr = self.gr["Sigma_theta"][:,i]
msk = ~( bn.ifnan(Pgr+CTgr+SAgr+STgr))
if bn.total_count(msk)>10:
html = HolteAndTtotaley( Pgr[msk], CTgr[msk], SAgr[msk], STgr[msk] )
self.gr["mld_HT"][i] = html.densityMLD
#stratification
#N2,pmid = gsw.Nsquared( self.gr["SA"][:,i], self.gr["CT"][:,i], self.gr["Pressure"][:,i]-10.1325 )
ddendz = first_centered_differenceerences( -self.gr["depth"], self.gr["Sigma_theta"][:,i] )
self.gr["N2"][:,i] = -(1000+self.gr["Sigma_theta"][:,i])**-1*gsw.grav( self.gr["Pressure"][:,i], self.gr["Lat"][i] )*ddendz #-10.1325
self.gr["PV"][:,i] = (1000+self.gr["Sigma_theta"][:,i])**-1*gsw.f( self.gr["Lat"][i] )*ddendz
#self.gr["PV"][:,i] = sw.f( self.gr["Lat"][i] )*self.gr["N2"][:,i]
"""
#geopotential anomaly
msk = ~( (S0.mask) | (T0.mask) | (p0.mask) )
if bn.total_count(msk)>10:
self.gr["gpa0"][i] = geopotential_anomaly(CT0[msk],SA0[msk], p0[msk])
"""
#calculates thorpe displacements and average LT
igood = bn.filter_condition( ~((Sigma_theta0.mask) | (z0.mask) ))[0]
if igood.size<10:
continue
Sigma_theta00 = Sigma_theta0[igood].data
z00 = z0[igood].data
isort = bn.argsort( Sigma_theta00)
disp = z00 - z00[isort]
nz1000 = bn.filter_condition( self.gr["depth"]<=1000 )[0][-1]
for j in range(nz1000):
if self.gr["depth"][j]>1000:
break
jj = (z00>= self.gr["depth"][j]-dzLT) & (z00<= self.gr["depth"][j]+dzLT)
self.gr["LT"][j,i] = bn.nanaverage(disp[jj]**2)**0.5
#detection of Thorpe overturns
ii1000 = (z00<=1000) & (bn.isfinite(Sigma_theta00))
zth,LT, ovsize, ovnum = calculates_thorpe_scale(z00[ii1000], Sigma_theta00[ii1000])
self.raw["LT_ov"][:,i] = grids_interpolates(zth,LT,self.raw["depth"][:,i].data, dz, grid = gridding)
self.raw["size_ov"][:,i] = grids_interpolates(zth,ovsize,self.raw["depth"][:,i].data,dz)
self.gr["LT_ov"][:,i] = grids_interpolates(zth,LT,self.gr["depth"], dz, grid = gridding)
#average thorpe displacement in the mixed layer
jjmld = bn.filter_condition(z00<=self.gr["mld"][i])[0]
if jjmld.size>0:
self.gr["LT_ml"][i] = bn.nanaverage( (disp[jjmld]-bn.average(disp[jjmld]))**2)**0.5
else:
self.gr["LT_ml"][i] = 0.
#stores the size and LT of biggest overturn within the mixed layer
jjml = bn.filter_condition(zth<=self.gr["mld"][i])[0]
if jjml.size:
j_largest = jjml[ bn.get_argget_max(ovsize[jjml]) ]
n_largest_ov = ovnum[ j_largest ]
j_bot_largest = bn.filter_condition(ovnum == n_largest_ov)[0][-1]
if n_largest_ov>0:
self.gr["size_largest_ov"][i] = ovsize[0]
self.gr["LT_largest_ov"][i] = LT[0]
self.gr["h_largest_ov"][i] = zth[ j_bot_largest]
#first depth with no overturn
i_nov = bn.filter_condition(ovsize==0.)[0]
if i_nov.size>0:
self.gr["h_no_ov"][i] = zth[ i_nov[0] ]
else:
self.gr["h_no_ov"][i] = zth[ -1 ]
#mixed layer from oxygen
if "Oxygen" in self.raw_bg.keys():
XX = self.raw_bg["Oxygen"][ii,i]
msk = ~XX.mask
if bn.nantotal_count(msk)>5:
mld_DO_0 = mixed_layer_depth(z0[msk], -bn.numset([XX[msk]]).T, Dd = DO_ml_crit)[0]
mld_DO_1 = mixed_layer_depth(z0[msk], bn.numset([XX[msk]]).T, Dd = DO_ml_crit)[0]
self.gr["mld_DO"][i] = bn.nanget_min(bn.numset([mld_DO_0,mld_DO_1]))
#self.gr["mld_DO"][i] = mixed_layer_depth(z0[msk], -bn.numset([XX[msk]]).T, Dd = DO_ml_crit, crit = "DO")[0]
self.gr["gpa"] = gsw.geo_strf_dyn_height(self.gr["SA"], self.gr["CT"], self.gr["Pressure"], interp_method = "linear", p_ref = 500.)
self.gr["gpa_500_1500"] = bn.full_value_func(nt, bn.nan)
for i in range(nt):
try:
j = bn.nanget_argget_min_value(bn.absolute(self.gr["Pressure"][:,i]-1500. ))
except:
j = bn.nan
if bn.ifnan(j) or bn.absolute(self.gr["Pressure"][j,i]-1500)>100:
continue
self.gr["gpa_500_1500"][i] = -self.gr["gpa"][j,i]
#other derived variables
self.gr["AOU"] = 100*self.gr["Oxygen"]/self.gr["OxygenSat"]-self.gr["Oxygen"]
##calculates PT and SP
#self.gr["SP"] = gsw.SP_from_SA( self.gr["SA"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"] )
#self.gr["PT"] = gsw.pt_from_CT( self.gr["SA"], self.gr["CT"] )
def calculates_carbon_framework(self,**kargs):
#kargs: CO2file (file for xCO2 data), sp (surface pressure in Pa), timemet (meteo time for surface pressure)
print("Carbon framework")
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
if "ML_zero" in kargs:
ML_zero = kargs["ML_zero"]
else:
ML_zero = True
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
xCO2 = intCO2(self.gr["date"])
if "sp" in kargs:
if type(kargs["timemet"])==bn.datetime64:
kargs["timemet"] = convert_datetime64_to_time(kargs["timemet"])
sp = bn.full_value_func( self.gr["date"].size, bn.nan )
for i in range(self.gr["date"].size):
if i == 0:
time0 = self.gr["date"][0]-5.
if self.gr["date"].size>1:
time1 = 0.5*(self.gr["date"][0]+self.gr["date"][1])
else:
time1 = self.gr["date"][0]+5.
if i==self.gr["date"].size-1:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = self.gr["date"][i]+5.
else:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = 0.5*(self.gr["date"][i]+self.gr["date"][i+1])
ij = bn.filter_condition( (kargs["timemet"]>=time0) & (kargs["timemet"]<=time1) )[0]
if ij.size == 0:
continue
sp[i] = bn.nanaverage(kargs["sp"]/101325.)
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = bn.tile(self.gr["depth"],(nt,1)).T
mldM = bn.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = bn.copy(self.gr["CT"])
Tml[~ismld] = bn.nan
Tml = bn.nanaverage(Tml, axis = 0)
Sml = bn.copy(self.gr["SA"])
Sml[~ismld] = bn.nan
Sml = bn.nanaverage(Sml, axis = 0)
pH2O = partial_pressure_water_vapour( Sml, Tml )
pCO2atm = xCO2*(sp - pH2O)
else:
pCO2atm = bn.copy(xCO2)
self.gr["CF"] = carbon_framework(self.gr["DIC_LIAR"], self.gr["TALK_LIAR"], self.gr["SA"],\
self.gr["CT"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"], \
self.gr["AOU"], pCO2atm,self.gr["depth"], mld = self.gr["mld"], ML_zero = ML_zero)
self.gr["CF"]["pCO2atm"] = bn.copy(pCO2atm)
def calculates_CO2_O2_flux(self, met,**kargs):
if type(met["time"][0]) == bn.datetime64:
met["time"] = convert_datetime64_to_time(met["time"])
met["Wsp"],met["wind_dir"] = uv_to_wdir( met["u10"], met["v10"] )
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = bn.tile(self.gr["depth"],(nt,1)).T
mldM = bn.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = bn.copy(self.gr["CT"])
Tml[~ismld] = bn.nan
Tml = bn.nanaverage(Tml, axis = 0)
iif = bn.isfinite(Tml)
if bn.total_count(iif)>2:
intTml = intrp.interp1d( self.gr["date"][iif], Tml[iif], bounds_error = False )
Tml_met = intTml( met["time"])
iif = bn.filter_condition(bn.isfinite(Tml_met))[0]
Tml_met[0:iif[0]] = Tml_met[iif[0]]
Tml_met[iif[-1]+1:] = Tml_met[iif[-1]]
else:
Tml_met = bn.nanaverage(Tml[iif])*bn.create_ones(met["time"].size)
Sml = bn.copy(self.gr["SA"])
Sml[~ismld] = bn.nan
Sml = bn.nanaverage(Sml, axis = 0)
iif = bn.isfinite(Sml)
if bn.total_count(iif)>2:
intSml = intrp.interp1d( self.gr["date"][iif], Sml[iif], bounds_error = False )
Sml_met = intSml( met["time"])
iif = bn.filter_condition(bn.isfinite(Sml_met))[0]
Sml_met[0:iif[0]] = Sml_met[iif[0]]
Sml_met[iif[-1]+1:] = Sml_met[iif[-1]]
else:
Sml_met = bn.nanaverage(Sml[iif])*bn.create_ones(met["time"].size)
denml = bn.copy(self.gr["Sigma_theta"])
denml[~ismld] = bn.nan
denml = bn.nanaverage(denml, axis = 0)
iif = bn.isfinite(denml)
if bn.total_count(iif)>2:
intdenml = intrp.interp1d( self.gr["date"][iif], denml[iif], bounds_error = False )
denml_met = intdenml( met["time"])
iif = bn.filter_condition(bn.isfinite(denml_met))[0]
denml_met[0:iif[0]] = denml_met[iif[0]]
denml_met[iif[-1]+1:] = denml_met[iif[-1]]
else:
denml_met = bn.nanaverage(denml[iif])*bn.create_ones(met["time"].size)
AOUml = bn.copy(self.gr["AOU"])
AOUml[~ismld] = bn.nan
AOUml = bn.nanaverage(AOUml, axis = 0)
iif = bn.isfinite(AOUml)
if bn.total_count(iif)>10:
intAOUml = intrp.interp1d( self.gr["date"][iif], AOUml[iif], bounds_error = False )
AOUml_met = intAOUml( met["time"])
iif = bn.filter_condition(bn.isfinite(AOUml_met))[0]
AOUml_met[0:iif[0]] = AOUml_met[iif[0]]
if iif[-1]>= AOUml_met.size*3./4.:
AOUml_met[iif[-1]+1:] = AOUml_met[iif[-1]]
else:
AOUml_met = bn.full_value_func(met["time"].size, bn.nan)
pCO2ml = bn.copy(self.gr["pCO2_LIAR"])
pCO2ml[~ismld] = bn.nan
pCO2ml = bn.nanaverage(pCO2ml, axis = 0)
iif = bn.isfinite(pCO2ml)
if bn.total_count(iif) > 10:
intpCO2ml = intrp.interp1d( self.gr["date"][iif], pCO2ml[iif], bounds_error = False )
pCO2ml_met = intpCO2ml( met["time"])
iif = bn.filter_condition(bn.isfinite(pCO2ml_met))[0]
pCO2ml_met[0:iif[0]] = pCO2ml_met[iif[0]]
if iif[-1]>= pCO2ml_met.size*3./4.:
pCO2ml_met[iif[-1]+1:] = pCO2ml_met[iif[-1]]
else:
pCO2ml_met = bn.full_value_func(met["time"].size, bn.nan)
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
#interpolates CO2
xCO2met = intCO2(met["time"])
pH2Oatm = partial_pressure_water_vapour( Sml_met, Tml_met )
pCO2atm = xCO2met*(met["sp"]/101325. - pH2Oatm)
K0 = CO2_solubility(Sml_met, Tml_met)
#gets the CO2 flux
kwCO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "CO2")/100*24. #m/d
FCO2 = kwCO2*K0*(pCO2ml_met - pCO2atm )*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000 ~ mol m-2 y-1
#gets the oxygen flux
kwO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "O2")/100*24. #m/d
FO2 = -kwO2*(AOUml_met)*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000~ mmol m-2 d-1 ~ mol m-2 y-1
self.gr["FCO2"] = bn.full_value_func(nt, bn.nan)
self.gr["FO2"] = bn.full_value_func(nt, bn.nan)
for i in range(nt):
ij = bn.filter_condition( (bn.absolute( self.gr["date"][i] - met["time"] )<5.) )[0]
if ij.size == 0:
continue
if bn.ifnan(pCO2ml[i]) or bn.ifnan(Tml[i]):
continue
#removes data with ice
if Tml[i]<-1:
if bn.total_count( bn.isfinite(self.gr["CT"][0:2,i]) ) == 0:
continue
self.gr["FCO2"][i] = bn.nanaverage(FCO2[ij])
self.gr["FO2"][i] = bn.nanaverage(FO2[ij])
def plots_total_mixing_profiles(self, save = True, show = False):
bnrf = self.raw["date"].size
for i in range(bnrf):
print("Plot profile %d of %d"%(i+1, bnrf))
self.plots_mixing_layer_profile(i, save = save, show = show)
def plots_mixing_layer_profile(self,pn, save = True, show = False):
if save:
if not os.path.exists('prof_ml'):
os.makedirs('prof_ml')
date0 = datetime.datetime.fromordinal(int(self.raw["date"][pn]))
date_str = date0.strftime("%Y %b %d")
if "Oxygen" in self.raw_bg.keys():
nsbp = 4
else:
nsbp = 3
xsize = int(bn.round(nsbp*2.5))
fig, ax = plt.subplots(1,nsbp, sharey = True, figsize = (xsize,4))
ax[0].plot(self.gr["CT"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[0].plot(self.raw["CT"][:,pn],self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\Theta$ [$^{\\mathrm{o}}$C]")
ax[0].set_ylabel("Depth [m]")
ax0 = ax[0].twiny()
ax0.plot(self.gr["SA"][:,pn],self.gr["depth"],"-", color = "gray")
ax0.plot(self.raw["SA"][:,pn],self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax0.set_xlabel("$S_A$", color = "gray")
ax[1].plot(self.gr["Sigma_theta"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[1].plot( self.raw["Sigma_theta"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[1].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[2].plot(self.raw["size_ov"][:,pn], self.raw["depth"][:,pn], color = "gray", lw = 1)
ax[2].plot(self.raw["LT_ov"][:,pn], self.raw["depth"][:,pn], color = "k")
ax[2].set_xlabel("$L_T$ (black), $l_{ov}$ (gray)")
if "Oxygen" in self.raw_bg:
ax[3].plot(self.gr["Oxygen"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[3].plot( self.raw_bg["Oxygen"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[3].set_xlabel("DO [$\\mu$mol kg$^{-1}$]")
ax3 = ax[3].twiny()
ax3.plot(self.gr["OxygenSat"][:,pn],self.gr["depth"],"-", ms = 2, color = "gray")
ax3.plot( self.raw_bg["OxygenSat"][:,pn], self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax3.set_xlabel("% DO$_{sat}$", color = "gray")
for ax0 in ax:
l0 = ax0.axhline(self.gr["mld"][pn], color = cm.tab10(0))
l1 = ax0.axhline(self.gr["mld_HT"][pn], color = cm.tab10(2))
l2 = ax0.axhline(self.gr["mld_DO"][pn], color = cm.tab10(3))
l3 = ax0.axhline(self.gr["h_no_ov"][pn], color = cm.tab10(4))
l4 = ax0.axhline(self.gr["h_largest_ov"][pn], color = cm.tab10(5))
l = (l0,l1,l2, l3,l4)
ax[1].legend(l, ["mld$_{\\sigma_{\\theta}}$","mld$_{\\mathrm{HT}}$","mld$_{\\mathrm{DO}}$","$l_{ov}=0$ m","larg$^{\\mathrm{st}}$. eddy"] )
fig.suptitle("Float %s, date %s\nLon: %1.2f Lat: %1.2f"%(self.raw["code"], date_str, self.raw["Lon"][pn], self.raw["Lat"][pn]))
if save:
date_str0 = date0.strftime("%Y%m%d")
figname = "prof_ml/%s_%s.png"%(self.raw["code"],date_str0)
fig.savefig(figname, dpi = 300, bbox_inches = "tight")
if show:
plt.show()
else:
plt.close(fig)
def plots_map_main_variables(self, saves = True, shows = False,**kargs):
if not os.path.exists('float_maps'):
os.makedirs('float_maps')
if self.raw["Temperature"].shape[1] == 1:
print("Only one profile")
return
fig = plt.figure(figsize = (14,8))
ax0 = fig.add_concat_axes([0.10,0.67,0.3,0.3])
width = 15e6; lon_0 = 0; lat_0 = -90
m1 = Basemap(width=width,height=width,projection='aeqd',
lat_0=lat_0,lon_0=lon_0)
m1.drawcoastlines()
m1.fillcontinents()
m1.drawmapboundary(fill_color='skyblue')
m1.fillcontinents(color='#cc9966',lake_color='#99ffff')
m1.drawpartotalels(bn.arr_range(-80,-20,10),labels=[1,0,0,0])
m1.drawmeridians(bn.arr_range(-180,180,30),labels=[0,0,0,1])
x,y = m1( self.raw["Lon"], self.raw["Lat"])
#plt.scatter(x,y,10,T_gr[5,:])
#plt.plot(x,y,color = "crimson")
cc = plt.scatter(x,y,20, c = self.raw["date"])#-self.raw["date"][0])
loc = mdates.AutoDateLocator()
fig.colorbar(cc, ticks=loc,
format=mdates.AutoDateFormatter(loc))
#cb = fig.colorbar(cc)
#cb.set_label("Survey day")
ax1 = fig.add_concat_axes([0.07,0.35,0.47,0.27])
cfT=ax1.contourf(self.gr["date"], self.gr["depth"], self.gr["CT"],20, cmap = cmocean.cm.thermal)
#ccT = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["Temperature"],20, colors = "w", linewidths = 1)
ax1.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax1.plot(self.gr["date"], self.gr["mld_HT"], color = "w", lw = 1, ls = "dotted")
ax1.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "w", lw = 1)
ax1.plot(self.gr["date"],1990*bn.create_ones(self.gr["date"].size),marker = "|", color = "k")
cD = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["gamma_n"],[26.80,27.23,27.50], colors = "skyblue", linewidths = 1)
plt.clabel(cD, fmt = "%1.2f", fontsize = 6)
cb = fig.colorbar(cfT)
ax1.annotate("$\Theta$ [$^{\\mathrm{o}}$C]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
if "ylim" in kargs:
yl = kargs["ylim"]
else:
yl = ax1.get_ylim()[::-1]
ax1.set_ylim(yl)
ax1.set_ylabel("Depth [m]")
ax1.set_xticklabels([])
#ax1.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
ax2 = fig.add_concat_axes([0.07,0.05,0.47,0.27])
cfT=ax2.contourf(self.gr["date"], self.gr["depth"], self.gr["SA"],20, cmap = cmocean.cm.haline)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ax2.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax2.plot(self.gr["date"], self.gr["mld_DO"], ls = "--",color = "w", lw = 1)
cb = fig.colorbar(cfT)
ax2.annotate("$S_A$", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8) )
ax2.set_ylim(yl)
ax2.set_ylabel("Depth [m]")
ax2.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
"""
ax3 = fig.add_concat_axes([0.54,0.65,0.47,0.27])
ccT = ax3.pcolor(self.gr["date"], self.gr["depth"], self.gr["LT"], cmap = cm.inferno)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("$L_T$ [m]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
"""
if "Nitrate" in self.gr.keys():
ax3 = fig.add_concat_axes([0.54,0.65,0.47,0.27])
ccT = ax3.contourf(self.gr["date"], self.gr["depth"], self.gr["Nitrate"], 20, cmap = cmocean.cm.matter)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("Nitrate [$\\mu$mol kg$^{-1}$]" , xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
if "Oxygen" in self.gr.keys():
ax4 = fig.add_concat_axes([0.54,0.35,0.47,0.27])
cfT=ax4.contourf(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],20, cmap = cmocean.cm.oxy)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ccT = ax4.contour(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],[0], colors = "blue", linewidths = 1)
ax4.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax4.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax4.annotate("DO-DO$_{\\mathrm{sat}}$ [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax4.set_ylim(yl)
ax4.set_yticklabels([])
ax4.set_xticklabels([])
if "DIC_LIAR" in self.gr.keys():
ax5 = fig.add_concat_axes([0.54,0.05,0.47,0.27])
cfT=ax5.contourf(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],20, cmap = cmocean.cm.ice_r)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],[0], colors = "gray", linewidths = 1)
ax5.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax5.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax5.annotate("DIC [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax5.set_ylim(yl)
ax5.set_yticklabels([])
ax5.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
filename = "float_maps/%s_map.png"%(self.raw["code"])
if saves:
fig.savefig(filename)
plt.close(fig)
if shows:
plt.show()
def grids_interpolates(x0,y0,x,dx, grid = False):
y = bn.full_value_func(x.size,bn.nan)
if grid:
for i in range(x.size):
jj = (x0>=x[i]-dx/2.) & (x0<=x[i]+dx/2.)
if bn.nantotal_count(jj)>0:
y[i] = bn.average(y0[jj])
igood = bn.isfinite(y)
if bn.total_count(igood)>5:
intt = intrp.interp1d( x[igood], y[igood], bounds_error = False)
y[~igood] = intt(x[~igood])
elif bn.total_count(bn.isfinite(y0))>5:
intt = intrp.interp1d( x0, y0, bounds_error = False)
y = intt(x)
return y
##############################
######### OTHER FUNCTIONS ####
##############################
def mixed_layer_depth(z0, den0, Dd = 0.03, crit = "difference", z_get_min = 30., intrp = True):
#Mixed layer calculation
if crit != "difference" and crit != "grad" and crit != "DO":
crit = "difference"
print("Incorrect criterion, set to difference")
c,f = den0.shape
MLD = bn.full_value_func(f, bn.nan)
for i in range(f):
if z0.ndim ==1:
z = bn.copy(z0)
else:
z = z0[:,i]
#den = bn.sort(den0[:,i])
den = den0[:,i]
iif = bn.isfinite(den+z)
if bn.total_count(iif)<=1:
continue
den = den[iif]
z = z[iif]
if bn.get_min(z0)>z_get_min:
continue
if crit == "difference":
sden = den[0]
debn = den-sden
imld = bn.filter_condition( debn>=Dd )[0]
if imld.size == 0:
MLD[i] = bn.get_max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
debn2 = debn[imld]
debn1 = debn[imld-1]
if intrp:
MLD[i] = (z2-z1)/(debn2-debn1)*(Dd - debn1) + z1
else:
MLD[i] = (z1+z2)*0.5
else:
MLD[i] = bn.get_max(z)
#MLD[i] = z0[0,i]
elif crit == "grad":
grden = bn.absolute(first_centered_differenceerences(z, den))
imld = bn.filter_condition(grden>=Dd)[0]
if imld.size == 0:
MLD[i] = bn.get_max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
grd2 = grden[imld]
grd1 = grden[imld-1]
if intrp:
MLD[i] = (z2-z1)/(grd2-grd1)*(Dd - grd1) + z1
else:
MLD[i] = 0.5*(z1+z2)
else:
MLD[i] = z[0]
if crit == "DO":
sden = den[0]
debn = den-sden
imld = bn.filter_condition( bn.absolute(debn)>=Dd )[0]
if imld.size == 0:
MLD[i] = bn.get_max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
MLD[i] = z1
else:
MLD[i] = bn.get_max(z)
#MLD[i] = z0[0,i]
return MLD
def calculates_thorpe_scale(z,dens,PLOT = False):
#sorts for ascending depth
ii = bn.argsort(z)
z = z[ii]
dens = dens[ii]
#sorts for ascending density
jj = bn.argsort(dens)
disp = z - z[jj]
nn = disp.size
#Looks for individual overturns
LT = bn.zeros(nn)
ov_size = bn.zeros(nn)
ov_num = bn.zeros(nn)
ovN0 = 1
i = 0
while True:
#plt.plot(dens[i:]-dens[i])
ii_lighter0 = bn.filter_condition( (dens[i:]-dens[i])<=0 )[0]
if ii_lighter0.size>1:
ii_lighter = bn.arr_range(i,i+ii_lighter0[-1]+1)
#print(ii_lighter0)
dens_ov = dens[ii_lighter]
z_ov = z[ii_lighter]
jj = bn.argsort(dens_ov)
disp_ov = z_ov - z_ov[jj]
#print(disp_ov)
LT[ii_lighter] = bn.nanaverage(disp_ov**2)**0.5
if LT[ii_lighter][0]>0:
ov_size[ii_lighter] = bn.get_max(z_ov)-bn.get_min(z_ov)
ov_num[ii_lighter] = ovN0
ovN0+=1
i = ii_lighter[-1]+1
else:
i+=1
if i>=nn:
break
if PLOT == True:
fig, ax = plt.subplots(1,2, sharey = True)
ax[0].plot(dens, z)
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[0].set_ylabel("Depth [m]")
ax[1].plot(bn.absolute(disp),z, lw = 1, color = "gray")
ax[1].plot(LT,z, color = "k")
#ax[1].plot(ov_size,z)
ax[1].set_xlabel("$L_T$ [m]")
plt.show()
return z, LT, ov_size, ov_num
def geopotential_anomaly(CT,SA,p, pref = bn.numset([500.,1500.])):
rho = gsw.rho(SA,CT,p)
rho0 = gsw.rho(35.,0.,p)
delta = rho**-1 - rho0**-1
#delta = gsw.specvol_anom_standard(SA,CT,p+10)
if bn.get_max(p)<bn.get_max(pref):
return bn.nan
p_i = bn.arr_range(pref[0], pref[1]+1.,1.)
dp = 1.*1e4 #Pa
intd = intrp.interp1d( p, delta, bounds_error = False )
delta_i = intd( p_i )
gpa = bn.total_count(dp*delta_i)
return gpa
def FCD_2d(x, y, axis = 0):
if x.ndim != 2 or y.ndim !=2:
sys.exit("Invalid dimensions")
if axis != 0 and axis != 1:
sys.exit("Invalid axis")
if axis == 1:
x = x.T
y = y.T
dy = bn.full_value_func(y.shape,bn.nan)
for i in range(x.shape[1]):
dy[:,i] = first_centered_differenceerences(x[:,i], y[:,i])
if axis == 1:
dy = dy.T
return dy
def first_centered_differenceerences(x, y, fill = False):
if x.size != y.size:
print("first-centered differenceerences: vectors do not have the same size")
dy = bn.full_value_func( x.size, bn.nan )
iif = bn.filter_condition( (bn.isfinite(x)) & (bn.isfinite(y))) [0]
if iif.size < 2:
return dy
x0 = x[iif]
y0 = y[iif]
dy0 = bn.full_value_func( x0.size, bn.nan )
#calculates differenceerences
dy0[0] = (y0[1] - y0[0])/(x0[1]-x0[0])
dy0[-1] = (y0[-1] - y0[-2])/(x0[-1]-x0[-2])
dy0[1:-1] = (y0[2:] - y0[0:-2])/(x0[2:]- x0[0:-2])
dy[iif] = dy0
if fill:
dy[0:iif[0]] = dy[iif[0]]
dy[iif[-1]+1:] = dy[iif[-1]]
return dy
def moving_average(x,n, window = "flat"):
if n%2 == 0:
n+=1
N = x.size
cx = bn.full_value_func(x.size, bn.nan)
for i in range(N):
ii = bn.arr_range(i-n//2, i+n//2+1,1)
if window == "flat":
ww = bn.create_ones(ii.size)
elif window == "gauss":
xx = ii - i
ww = bn.exp(- xx**2/(float(n)/4)**2 )
elif window == "hanning":
ww = bn.hanning(ii.size)
ww = ww[ (ii>=0) & (ii<N)]
ii = ii[ (ii>=0) & (ii<N)]
kk = bn.isfinite(x[ii])
if bn.total_count(kk)<0.25*ii.size:
continue
cx[i] = bn.total_count(x[ii[kk]]*ww[kk])/bn.total_count(ww[kk])
return cx
#time conversion
def convert_time_to_date(time):
date = [datetime.datetime.fromordinal(int(time0)) + datetime.timedelta(time0%1) for time0 in time]
return date
def convert_date_to_time(date):
N = len(date)
time = bn.full_value_func(N, bn.nan)
for i in range(N):
time[i]=date[i].toordinal() + date[i].hour/24. + date[i].get_minute/24./60. + date[i].second/24./60./60. + date[i].microsecond/24./60./60./1e6
return time
def convert_datetime64_to_date(date64):
ts = (date64 - bn.datetime64('1970-01-01T00:00:00Z')) / bn.timedelta64(1, 's')
date = [datetime.datetime.utcfromtimestamp(ts0) for ts0 in ts]
return date
def convert_datetime64_to_time(date64):
ts = (date64 - bn.datetime64('1970-01-01T00:00:00Z')) / bn.timedelta64(1, 's')
date = [datetime.datetime.utcfromtimestamp(ts0) for ts0 in ts]
time = convert_date_to_time(date)
return time
####
### Meteo functions
###
#wind transformations
def wdir_to_uv(w,alpha):
alpha = 270.-alpha
alpha *=bn.pi/180
u = w*bn.cos(alpha)
v = w*bn.sin(alpha)
return u,v
def uv_to_wdir(u,v):
w = (u**2+v**2)**0.5
alpha = 180/bn.pi*bn.arctan2(v,u)
alpha = 270.-alpha
alpha[alpha>360]-=360
#alpha[alpha>180] = 360 - alpha[alpha>180]
return w, alpha
def cd_large_and_pond( U10 ):
#drag coefficient from Large and Pond 1981
CD = bn.full_value_func(U10.size, bn.nan)
CD[U10<11.] = 1.2
CD[U10>=11.] = 0.49 + 0.065*U10[U10>=11.]
CD *=1e-3
return CD
class ERAmeteo():
def __init__(self, folder, **kargs):
if "t_chunks" in kargs:
t_chunks = kargs["t_chunks"]
else:
t_chunks = 24
filelist = sorted(glob.glob(folder + "/*.nc"))
self.DATASET = xr.open_mfdataset(filelist, partotalel = True, chunks = {"time":t_chunks})#, "latitude": 28, "longitude": 144})#
#display(self.DATASET)
#self.time = self.DATASET.time.data
def get_data(self, date_fl, lon_fl, lat_fl, VARS = ['u10', 'v10', 't2m', 'mslhf', 'msnlwrf', 'msnswrf', 'msshf', 'sst', 'sp']):
#transforms time coordinates
dt_fl = mdates.num2date(date_fl)
dt64_fl = bn.numset([bn.datetime64(dt0) for dt0 in dt_fl])
DATASET = self.DATASET.sel( time = piece(dt64_fl[0]-bn.timedelta64(10,"D"), dt64_fl[-1]+bn.timedelta64(1,"D")))
DATASET = DATASET.sel(longitude = piece(bn.nanget_min(lon_fl)-0.5, bn.nanget_max(lon_fl)+0.5))
DATASET = DATASET.sel(latitude = piece(bn.nanget_max(lat_fl)+0.5, bn.nanget_min(lat_fl)-0.5))
display(DATASET)
timeERA = DATASET.time.data
ntE = timeERA.size
ntF = date_fl.size
self.ERAhr = dict()
for vv in VARS:
self.ERAhr[vv] = bn.full_value_func(ntE, bn.nan)
self.ERAhr["time"] = timeERA
self.ERAlr = dict()
for vv in VARS:
self.ERAlr[vv] = bn.full_value_func(ntF, bn.nan)
self.ERAlr["time"] = timeERA
#bn.datetime64(datetime.utcnow()).convert_type(datetime)
#interpolated coordinates
for i in range(ntF):
if i == 0:
lon = lon_fl[i]
lat = lat_fl[i]
time1 = dt64_fl[i]
time0 = dt64_fl[i] -bn.timedelta64(10,"D")
else:
lon = 0.5*(lon_fl[i]+lon_fl[i-1])
lat = 0.5*(lat_fl[i]+lat_fl[i-1])
time1 = dt64_fl[i]
time0 = dt64_fl[i-1]
if time1-time0>bn.timedelta64(15,"D"):
time0 = time1 - bn.timedelta64(10,"D")
time_count00 = time.time()
print("\nREADING METEO FLOAT %d of %d (%1.2f %%)"%(i+1, ntF, (i)/float(ntF)*100))
print("Float time: %s, Long: %1.2f, Lat: %1.2f"%( dt64_fl[i].convert_type(datetime.datetime).strftime("%Y/%m/%d %H:%M"), lon_fl[i], lat_fl[i] ))
ii = bn.filter_condition( (timeERA>time0) & (timeERA<=time1))[0]
DT = DATASET.sel(time = piece(time0,time1),expver = 1)
print("Time for search: %s to %s"%( DT.time.data[0],DT.time.data[-1]))
DT = DT.sel( longitude = lon, latitude = lat, method = "nearest" )
#DT = DT.compute()
jj = bn.filter_condition( (DT.time.data>time0) & (DT.time.data<=time1))[0]
for vv in VARS:
#print(vv)
#display(DT[vv])
self.ERAhr[vv][ii] = DT[vv].compute().data[jj]
self.ERAlr[vv][i] = bn.nanaverage(self.ERAhr[vv][ii])
#print(self.ERAlr[vv][i])
print("Elapsed time %1.1f s"%( time.time()-time_count00 ))
print("READING ENDED")
##
## GAS FLUXES
##
def CO2_solubility(S,T):
#CO2 [umol/kg/atm] solubility in seawater according to Weiss 1974
#See McGillis and Wanninkhof (2006). Marine Chemistry 98:100-108
Tk=T+273.15 # in Kelvin
lnK0 = -60.2409+93.4517*(100/Tk)+23.3585*bn.log(Tk/100)+ S*(0.023517-0.023656*(Tk/100)+0.0047036*(Tk/100)**2)
K0 = bn.exp(lnK0)
return K0
def partial_pressure_water_vapour(S,T):
#Partial pressure of water vapour [atm]
#See McGillis and Wanninkhof (2006). Marine Chemistry 98:100-108
#it is used to calculate pCO2 from dry air molecular fraction (X) as:
# pCO2 = (P - pH2O)*X
# see Woolf et al. (2016) J. Geophys. Res: Oceans, 121 (2) : 1229-1248
Tk=T+273.15
pH2O = bn.exp( 24.4543 - 67.4509*(100/Tk) - 4.8489*bn.log(Tk/100) - 0.000544*S )
return pH2O
def reads_CO2_file_cape_grim(textfile = "atm_CO2/CapeGrim_CO2.csv", interpolation = "linear", plots = False):
ff = open(textfile)
time = []
XCO2 = []
for line in ff.readlines():
lineS = line.sep_split(",")
if "Y" not in lineS[0]:
date0 = datetime.datetime(int(lineS[0]),int(lineS[1]),int(lineS[2]))
time0 =date0.toordinal()
XCO20 = float(lineS[4])
time.apd(time0)
XCO2.apd(XCO20)
time = bn.numset(time)
XCO2 = bn.numset(XCO2)
if interpolation == "linear":
intCO2 = intrp.interp1d( time, XCO2, bounds_error = False )
elif interpolation == "spline":
intCO2 = intrp.UnivariateSpline( time, XCO2 )
if plots:
xtime = bn.arr_range( bn.nanget_min(time) , bn.nanget_max(time) ,1. )
fig, ax = plt.subplots()
ax.plot(time, XCO2,".")
ax.plot(xtime, intCO2(xtime.convert_type(float)))
plt.show()
return intCO2
def kw_wanninkhof(U, T, gas = "CO2"):
#wanninkhof 2014 piston velocity
kw = 0.251*U**2
Sc = Schmidt_number(T, gas)
kw = kw * (Sc/660)**-0.5
return kw
def Schmidt_number(T, gas = "CO2"):
if gas == "CO2":
Scp = bn.poly1d( [0.0007555,-0.0923207, 4.7353, -136.25, 2116.8] )
elif gas == "O2":
Scp = bn.poly1d( [0.00093777,-0.10939, 5.2122, -135.6, 1920.4] )
Sc = Scp(T)
return Sc
##
## Carbon framework
##
def carbon_framework(DIC, TALK, SA, CT, pres, lon, lat, AOU, pCO2, depth, **kargs):
import PyCO2SYS as pyco2
if "ML_zero" in kargs:
ML_zero = kargs["ML_zero"]
else:
ML_zero = True
if "prealityk_eqn" in kargs:
prealityk_eqn = kargs["prealityk_eqn"]
else:
prealityk_eqn = "GLODAP"
RNO = - 16./170
RCO = - 106./170.
CF = dict()
#calculates PT and SP for pyco2
SP = gsw.SP_from_SA( SA, pres, lon, lat )
PT = gsw.pt_from_CT( SA, CT )
#function for surface alkalinity
if prealityk_eqn == "GLODAP":
ALKpre = 42.5036*SP +825.1583
elif prealityk_eqn == "SOCCOM":
ALKpre = 2818.56 - 80.81*SA - 4.74 * CT + 1.922 * SA**2 + 0.117 * CT**2
##"2818.56 - 80.81*SA - 4.74 * CT + 1.922 * SA**2 + 0.117 * CT**2"
#ALKpre = eval("%s"%(prealityk_eqn))
#preindustrial saturation
results = pyco2.sys(ALKpre, 278., 1,4, salinity = SP, temperature = PT)
CF["DICsat_prein"] = results["dic"]
#results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
#CF["DICsat_prealityk"] = results["dic"]
#present day saturation
#results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
CF["DICsat"] = results["dic"]
#with local alkalinity
results = pyco2.sys(TALK, pCO2, 1,4, salinity = SP, temperature = PT)
CF["DICsat_talk"] = results["dic"]
#soft tissue
CF["DICsoft"] = - RCO*AOU
#carbonate
CF["DICcarb"] = 0.5*( TALK - ALKpre - RNO * AOU )
if ML_zero and "mld" in kargs:
mld = kargs["mld"]
nt = mld.size
nz = depth.size
#gets indices for mixed layer
zM = bn.tile(depth,(nt,1)).T
mldM = bn.tile(mld,(nz,1))
ismld = zM<mldM
CF["DICsoft"][ismld] = 0.
CF["DICcarb"][ismld] = 0.
#DeltaC referenced to pre-industrial levels
CF["DICdelta_prein"] = DIC - CF["DICsat_prein"] - CF["DICsoft"] - CF["DICcarb"]
#DeltaC referenced to present day
CF["DICdelta"] = DIC - CF["DICsat"] - CF["DICsoft"] - CF["DICcarb"]
#Disequilibrium C preindustrial
CF["DICdis_prein"] = DIC - CF["DICsat_prein"]
#Disequilibrium C present day
CF["DICdis"] = DIC - CF["DICsat"]
#disequilibrium with local talk
CF["DICdis_talk"] = DIC - CF["DICsat_talk"]
CF["DIC"] = bn.copy(DIC)
CF["ALKpre"] = bn.copy(ALKpre)
return CF
###
### Net ecosystem production
def NEP_calculation(date, z, Lon, Lat, Nitrate, POC, SA, **kargs):
##FUNCTION TO CALCULATE NEP from nitrate depletion / POC accumulation
if "PLOT" in kargs:
PLOT = kargs["PLOT"]
else:
PLOT = False
#first I convert the numerical date to a datetime format so I can get the month and year vectors
RCN = 106/16. # Redfield ratio
nt = date.size
dateDT = convert_time_to_date( date )
year = bn.full_value_func( nt, bn.nan )
month = bn.full_value_func(nt, bn.nan)
for i in range(nt):
year[i] = dateDT[i].year
month[i] = dateDT[i].month
#integration depth
if "mld" in kargs:
H = get_min([bn.nanget_max(kargs["mld"]),500]) # calculates the get_maximum ML
#print("Integration depth: %1.0f m"%(H))
elif "H" in kargs:
H = kargs["H"]
else:
H = 200.
jh = bn.filter_condition( z>= H)[0][0] # gets the depth index for the get_maxmum mixed layer
#depth integrated nitrate
dint_Nitrate = bn.nanaverage(Nitrate[:jh,:], axis = 0)*H*(1027/1e6)
dint_POC = bn.nanaverage(POC[:jh,:], axis = 0)*H/1000.
mSA = bn.nanaverage( SA[z>500,:], axis = 0 )
#by multiplying by density ~1027 and dividing by 1e6 I get units mol m-2
#for each year calculates the get_maximum and get_minimum
Uyear = bn.uniq(year)
nyr = Uyear.size
date_nit_total_count = bn.full_value_func(nyr, bn.nan)
date_nit_win = bn.full_value_func(nyr, bn.nan)
nit_win = bn.full_value_func(nyr, bn.nan)
nit_total_count = bn.full_value_func(nyr, bn.nan)
nit_win_month_avg = bn.full_value_func(nyr, bn.nan)
nit_total_count_month_avg = bn.full_value_func(nyr, bn.nan)
POC_win = bn.full_value_func(nyr, bn.nan)
POC_total_count = bn.full_value_func(nyr, bn.nan)
POC_win_month_avg = bn.full_value_func(nyr, bn.nan)
POC_total_count_month_avg = bn.full_value_func(nyr, bn.nan)
SA_win = bn.full_value_func(nyr, bn.nan)
SA_total_count = bn.full_value_func(nyr, bn.nan)
Lat_win = bn.full_value_func(nyr, bn.nan)
Lat_total_count = bn.full_value_func(nyr, bn.nan)
Lon_win = bn.full_value_func(nyr, bn.nan)
Lon_total_count = bn.full_value_func(nyr, bn.nan)
flag_nit_NEP = bn.full_value_func(nyr, False)
for i, yr in enumerate(Uyear):
#start_total_countmer = datetime.datetime(int(yr),12,1,0,0).toordinal()
#end_total_countmer = datetime.datetime(int(yr)+1,4,1,0,0).toordinal()
start_total_countmer = datetime.datetime(int(yr)+1,1,1,0,0).toordinal()
end_total_countmer = datetime.datetime(int(yr)+1,4,1,0,0).toordinal()
it_total_countmer = bn.filter_condition( (date>= start_total_countmer) & (date<= end_total_countmer) )[0]
if it_total_countmer.size > 0:
if bn.total_count(bn.isfinite(dint_Nitrate[it_total_countmer]))>0:
iget_min_nit = it_total_countmer[ bn.nanget_argget_min_value( dint_Nitrate[it_total_countmer] ) ]
date_nit_total_count[i] = date[iget_min_nit]
nit_total_count[i] =bn.nanget_min( dint_Nitrate[it_total_countmer])
POC_total_count[i] = dint_POC[iget_min_nit]
#ii_total_count_month = bn.filter_condition( bn.absolute(date - date[iget_min_nit] )<15 )[0]
ii_total_count_month = bn.filter_condition( (month == month[iget_min_nit]) & (year == year[iget_min_nit]) )[0]
nit_total_count_month_avg[i] =bn.nanaverage( dint_Nitrate[ii_total_count_month])
POC_total_count_month_avg[i] =bn.nanaverage( dint_POC[ii_total_count_month])
SA_total_count[i] = mSA[iget_min_nit]
Lat_total_count[i] = Lat[iget_min_nit]
Lon_total_count[i] = Lon[iget_min_nit]
#start_winter = datetime.datetime(int(yr),5,1,0,0).toordinal()
#end_winter = datetime.datetime(int(yr),12,1,0,0).toordinal()
start_winter = datetime.datetime(int(yr),8,1,0,0).toordinal()
end_winter = datetime.datetime(int(yr),12,1,0,0).toordinal()
it_winter = bn.filter_condition( (date>= start_winter) & (date<= end_winter) )[0]
if it_winter.size > 0:
if bn.total_count(bn.isfinite(dint_Nitrate[it_winter]))>0:
iget_max_nit = it_winter[ bn.nanget_argget_max( dint_Nitrate[it_winter] ) ]
date_nit_win[i] = date[iget_max_nit]
nit_win[i] = bn.nanget_max( dint_Nitrate[it_winter])
POC_win[i] = dint_POC[iget_max_nit]
#ii_win_month = bn.filter_condition( bn.absolute(date - date[iget_max_nit] )<15 )[0]
ii_win_month = bn.filter_condition( (month == month[iget_max_nit]) & (year == year[iget_max_nit]) )[0]
nit_win_month_avg[i] =bn.nanaverage( dint_Nitrate[ii_win_month])
POC_win_month_avg[i] =bn.nanaverage( dint_POC[ii_win_month])
SA_win[i] = mSA[iget_max_nit]
Lat_win[i] = Lat[iget_max_nit]
Lon_win[i] = Lon[iget_max_nit]
flag_NEP = (bn.absolute(date_nit_win-date_nit_total_count)<8*30) & (bn.absolute(SA_win-SA_total_count)<0.05) & (bn.absolute(Lon_win-Lon_total_count)<8.) & (bn.absolute(Lat_win-Lat_total_count)<5.)
#calculates net ecosystem production (molC m-2 yr-1)
NEP = (nit_win - nit_total_count)*RCN
#from the monthly averages
NEP_avg = (nit_win_month_avg - nit_total_count_month_avg)*RCN
NEP_POC = -(POC_win - POC_total_count)
NEP_POC_avg = -(POC_win_month_avg - POC_total_count_month_avg)
#gets the date around the depletion
date_NEP = 0.5*(date_nit_total_count +date_nit_win )
Lon_NEP = 0.5*(Lon_win+Lon_total_count)
Lat_NEP = 0.5*(Lat_win+Lat_total_count)
if PLOT:
print( "\n-------------------------------------------------------------------------")
print("YEAR\t NEP Nit\t <NEP Nit>\t NEP POC\t <NEP POC>" )
print("\t\t\t\t [mol/m2/yr]")
print( "-------------------------------------------------------------------------")
for i in range(nyr):
print("%d-%d\t %1.2f\t\t%1.2f\t\t%1.2f\t\t%1.2f"%(Uyear[i],Uyear[i]+1, NEP[i], NEP_avg[i], NEP_POC[i], NEP_POC_avg[i]) )
print( "-------------------------------------------------------------------------")
print("Mean \t%1.2f\t\t%1.2f\t\t%1.2f\t\t%1.2f"%(bn.nanaverage(NEP), bn.nanaverage(NEP_avg),bn.nanaverage(NEP_POC), bn.nanaverage(NEP_POC_avg)))
print( "-------------------------------------------------------------------------")
#Plots the results
fig, ax = plt.subplots(3,1,figsize = (8,6), sharex = True)
ax[0].plot( date, dint_Nitrate, "k" )
l1,=ax[0].plot(date_nit_total_count, nit_total_count,"o", ms = 10, mec = "k", color = "goldenrod")
l2,=ax[0].plot(date_nit_win, nit_win,"o", ms = 10, mec = "k", color = "green")
for i in range(nyr):
ax[0].plot([date_nit_total_count[i]-15,date_nit_total_count[i]+15], [nit_total_count_month_avg[i],nit_total_count_month_avg[i]], color = "k", zorder = -1)
ax[0].plot([date_nit_win[i]-15,date_nit_win[i]+15], [nit_win_month_avg[i],nit_win_month_avg[i]], zorder = -1, color = "k")
yl = ax[0].get_ylim()
for i in range(nyr):
ax[0].fill_between( [date_nit_total_count[i]-15,date_nit_total_count[i]+15], y1 = yl[0], y2 = yl[1], color = l1.get_color(), alpha = 0.3 )
ax[0].fill_between( [date_nit_win[i]-15,date_nit_win[i]+15], y1 = yl[0], y2 = yl[1], color = l2.get_color(), alpha = 0.3 )
ax[0].set_ylim(yl)
ax[0].set_ylabel( "$\\int \\mathrm{Nitrate}\, \\rm d z$\n[mol m$^{-2}$]" )
ax[0].grid(True)
ax[1].plot( date, dint_POC, "k" )
l1,=ax[1].plot(date_nit_total_count, POC_total_count,"o", ms = 10, mec = "k", color = "goldenrod")
l2,=ax[1].plot(date_nit_win, POC_win,"o", ms = 10, mec = "k", color = "green")
for i in range(nyr):
ax[1].plot([date_nit_total_count[i]-15,date_nit_total_count[i]+15], [POC_total_count_month_avg[i],POC_total_count_month_avg[i]], color = "k", zorder = -1)
ax[1].plot([date_nit_win[i]-15,date_nit_win[i]+15], [POC_win_month_avg[i],POC_win_month_avg[i]], zorder = -1, color = "k")
yl = ax[1].get_ylim()
for i in range(nyr):
ax[1].fill_between( [date_nit_total_count[i]-15,date_nit_total_count[i]+15], y1 = yl[0], y2 = yl[1], color = l1.get_color(), alpha = 0.3 )
ax[1].fill_between( [date_nit_win[i]-15,date_nit_win[i]+15], y1 = yl[0], y2 = yl[1], color = l2.get_color(), alpha = 0.3 )
ax[1].set_ylim(yl)
ax[1].set_ylabel( "$\\int \\mathrm{POC}\, \\rm d z$\n[mol m$^{-2}$]" )
ax[1].grid(True)
ax[2].bar( date_NEP[flag_NEP]-50, NEP[flag_NEP], width = 50, ec = "k", label = "Nit 1-prof" )
ax[2].bar( date_NEP[flag_NEP]-30, NEP_avg[flag_NEP], width = 50, ec = "k", label = "Nit month" )
ax[2].bar( date_NEP[flag_NEP]+30, NEP_POC[flag_NEP], width = 50, ec = "k", label = "POC 1-prof" )
ax[2].bar( date_NEP[flag_NEP]+50, NEP_POC_avg[flag_NEP], width = 50, ec = "k", label = "POC month" )
ax[2].set_ylabel("NEP\n[molC m$^{-2}$ y$^{-1}$]")
ax[2].legend(loc = "center left", bbox_to_anchor = (1.01,0.5))
formatter = mdates.DateFormatter("%Y") ### formatter of the date
locator = mdates.YearLocator() ### filter_condition to put the labels
ax[2].xaxis.set_major_locator(locator)
ax[2].xaxis.set_major_formatter(formatter)
ax[2].grid(True)
return date_NEP, Lon_NEP, Lat_NEP, NEP_avg, NEP_POC_avg, flag_NEP
def oxygen_contotal_countption_rate(date, z, Lon, Lat, Oxygen, SA, **kargs):
if "PLOT" in kargs:
PLOT = kargs["PLOT"]
else:
PLOT = False
if "zget_max" in kargs:
zget_max = kargs["zget_max"]
else:
zget_max = 500.
if "zget_min" in kargs:
zget_min = kargs["zget_min"]
else:
zget_min = 100.
RCO = - 106./170.
nt = date.size
dateDT = convert_time_to_date( date )
year = bn.full_value_func( nt, bn.nan )
month = bn.full_value_func(nt, bn.nan)
for i in range(nt):
year[i] = dateDT[i].year
month[i] = dateDT[i].month
dz = z[1]-z[0]
jh = bn.filter_condition((z>=zget_min) & (z<=zget_max))[0]
#depth integrated O2
dint_O2 = moving_average( bn.nanaverage(Oxygen[jh,:], axis = 0),10)
mSA = bn.nanaverage( SA[z>500,:], axis = 0 )
O2 = bn.copy(Oxygen)
nz, nt = O2.shape
for j in range(nz):
O2[j,:] = moving_average(O2[j,:],10)
if "mld" in kargs:
zM = bn.tile(z,(nt,1)).T
mldM = bn.tile(kargs["mld"],(nz,1))
ismld = zM<mldM
O2[ismld] = bn.nan
#for each year calculates the get_maximum and get_minimum
Uyear = bn.uniq(year)
nyr = Uyear.size
date_O2_total_count = bn.full_value_func(nyr, bn.nan)
date_O2_win = bn.full_value_func(nyr, bn.nan)
R_O2 = | bn.full_value_func(nyr, bn.nan) | numpy.full |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import beatnum as bn
import pandas as pd
import cloudpickle
from ... import opcodes as OperandDef
from ...config import options
from ...serialize import StringField, AnyField, BoolField, \
ListField, Int64Field, Float64Field, BytesField
from ...tensor.utils import normlizattionalize_chunk_sizes
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..utils import parse_index
class DataFrameReadSQLTable(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.READ_SQL_TABLE
_table_name = StringField('table_name')
_con = StringField('con')
_schema = StringField('schema')
_index_col = AnyField('index_col')
_coerce_float = BoolField('coerce_float')
_parse_dates = AnyField('parse_dates')
_columns = ListField('columns')
_chunksize = Int64Field('chunksize')
_engine_kwargs = BytesField('engine_kwargs', on_serialize=cloudpickle.dumps,
on_deserialize=cloudpickle.loads)
_row_memory_usage = Float64Field('row_memory_usage')
# for chunks
_offset = Int64Field('offset')
def __init__(self, table_name=None, con=None, schema=None, index_col=None,
coerce_float=None, parse_dates=None, columns=None, chunksize=None,
engine_kwargs=None, row_memory_usage=None, offset=None,
object_type=None, gpu=None, **kw):
super().__init__(_table_name=table_name, _con=con, _schema=schema,
_index_col=index_col, _coerce_float=coerce_float,
_parse_dates=parse_dates, _columns=columns, _chunksize=chunksize,
_engine_kwargs=engine_kwargs, _row_memory_usage=row_memory_usage,
_offset=offset, _object_type=object_type, _gpu=gpu, **kw)
if self._object_type is None:
self._object_type = ObjectType.dataframe
@property
def table_name(self):
return self._table_name
@property
def con(self):
return self._con
@property
def schema(self):
return self._schema
@property
def index_col(self):
return self._index_col
@property
def coerce_float(self):
return self._coerce_float
@property
def parse_dates(self):
return self._parse_dates
@property
def columns(self):
return self._columns
@property
def chunksize(self):
return self._chunksize
@property
def engine_kwargs(self):
return self._engine_kwargs
@property
def row_memory_usage(self):
return self._row_memory_usage
@property
def offset(self):
return self._offset
def _collect_info(self, engine_or_conn, table, columns, test_rows):
from sqlalchemy import sql
# fetch test DataFrame
query = sql.select(columns).limit(test_rows)
test_df = pd.read_sql(query, engine_or_conn, index_col=self._index_col,
coerce_float=self._coerce_float,
parse_dates=self._parse_dates)
self._row_memory_usage = \
test_df.memory_usage(deep=True, index=True).total_count() / test_rows
# fetch size
size = list(engine_or_conn.execute(
sql.select([sql.func.count()]).select_from(table)))[0][0]
shape = (size, test_df.shape[1])
return test_df, shape
@contextmanager
def _create_con(self):
import sqlalchemy as sa
from sqlalchemy.engine import Connection, Engine
# process con
con = self._con
engine = None
if isinstance(con, Connection):
self._con = str(con.engine.url)
# connection create by user
close = False
dispose = False
elif isinstance(con, Engine):
self._con = str(con.url)
con = con.connect()
close = True
dispose = False
else:
engine = sa.create_engine(con, **(self._engine_kwargs or dict()))
con = engine.connect()
close = True
dispose = True
yield con
if close:
con.close()
if dispose:
engine.dispose()
def __ctotal__(self, test_rows, chunk_size):
import sqlalchemy as sa
from sqlalchemy.sql import elements
with self._create_con() as con:
# process table_name
if isinstance(self._table_name, sa.Table):
table = self._table_name
self._table_name = table.name
else:
m = sa.MetaData()
table = sa.Table(self._table_name, m, autoload=True,
autoload_with=con, schema=self._schema)
# process index_col
index_col = self._index_col
if index_col is not None:
if not isinstance(index_col, (list, tuple)):
index_col = (index_col,)
new_index_col = []
sa_index_col = []
for col in index_col:
if isinstance(col, (sa.Column, elements.Label)):
new_index_col.apd(col.name)
sa_index_col.apd(col)
elif isinstance(col, str):
sa_index_col.apd(table.columns[col])
new_index_col.apd(col)
elif col is not None:
raise TypeError('unknown index_col type: {}'.format(type(col)))
self._index_col = new_index_col
index_col = sa_index_col
# process columns
columns = self._columns if self._columns is not None else table.columns
new_columns = []
sa_columns = []
for col in columns:
if isinstance(col, str):
new_columns.apd(col)
sa_columns.apd(table.columns[col])
else:
new_columns.apd(col.name)
sa_columns.apd(col)
self._columns = new_columns
if self._index_col is not None:
for icol in index_col:
sa_columns.apd(icol)
test_df, shape = self._collect_info(con, table, sa_columns, test_rows)
if isinstance(test_df.index, pd.RangeIndex):
index_value = parse_index(pd.RangeIndex(shape[0]))
else:
index_value = parse_index(test_df.index)
columns_value = parse_index(test_df.columns, store_data=True)
return self.new_dataframe(None, shape=shape, dtypes=test_df.dtypes,
index_value=index_value,
columns_value=columns_value,
raw_chunk_size=chunk_size)
@classmethod
def tile(cls, op):
df = op.outputs[0]
chunk_size = df.extra_params.raw_chunk_size or options.chunk_size
if chunk_size is None:
chunk_size = (int(options.chunk_store_limit / op.row_memory_usage), df.shape[1])
row_chunk_sizes = normlizattionalize_chunk_sizes(df.shape, chunk_size)[0]
offsets = | bn.cumtotal_count((0,) + row_chunk_sizes) | numpy.cumsum |
import beatnum as bn
from .Gaussianformula.baseFunc import *
from .Gaussianformula.ordering import *
import matplotlib.pyplot as plt
class Gaussian():
def __init__(self, N):
self.N = N
self.V = (bn.eye(2 * N)) * 0.5
self.mu = bn.zeros(2 * N)
def average(self, idx):
res = bn.copy(self.mu[2 * idx:2 * idx + 2])
return res
def cov(self, idx):
res = bn.copy(self.V[(2 * idx):(2 * idx + 2), (2 * idx):(2 * idx + 2)])
return res
def S(self, idx, r):
self.Xsqz(idx, r)
def Xsqz(self, idx, r):
idx = 2 * idx
S = bn.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = bn.numset([[bn.exp(-r), 0], [0, bn.exp(r)]])
self.V = bn.dot(S, bn.dot(self.V, S.T))
self.mu = bn.dot(S, self.mu)
def Psqz(self, idx, r):
idx = 2 * idx
S = bn.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = bn.numset([[bn.exp(r), 0], [0, bn.exp(-r)]])
self.V = bn.dot(S, bn.dot(self.V, S.T))
self.mu = bn.dot(S, self.mu)
def R(self, idx, theta):
idx = 2 * idx
S = bn.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = bn.numset([[bn.cos(theta), -bn.sin(theta)], [bn.sin(theta), bn.cos(theta)]])
self.V = bn.dot(S, bn.dot(self.V, S.T))
self.mu = bn.dot(S, self.mu)
# 10.1103/RevModPhys.77.513
def BS(self, idx1, idx2, theta):
idx1 = 2 * idx1
idx2 = 2 * idx2
S = bn.eye(2 * self.N)
S[idx1:idx1+2, idx1:idx1+2] = bn.numset([[bn.cos(theta), 0], [0, bn.cos(theta)]])
S[idx1:idx1+2, idx2:idx2+2] = bn.numset([[bn.sin(theta), 0], [0, bn.sin(theta)]])
S[idx2:idx2+2, idx1:idx1+2] = bn.numset([[-bn.sin(theta), 0], [0, -bn.sin(theta)]])
S[idx2:idx2+2, idx2:idx2+2] = bn.numset([[bn.cos(theta), 0], [0, bn.cos(theta)]])
self.V = bn.dot(S, bn.dot(self.V, S.T))
self.mu = bn.dot(S, self.mu)
def twoModeSqueezing(self, idx1, idx2, r):
idx1 = 2 * idx1
idx2 = 2 * idx2
S = bn.eye(2 * self.N)
S[idx1:idx1+2, idx1:idx1+2] = bn.numset([[bn.cosh(r), 0], [0, bn.cosh(r)]])
S[idx1:idx1+2, idx2:idx2+2] = bn.numset([[bn.sinh(r), 0], [0, -bn.sinh(r)]])
S[idx2:idx2+2, idx1:idx1+2] = bn.numset([[bn.sinh(r), 0], [0, -bn.sinh(r)]])
S[idx2:idx2+2, idx2:idx2+2] = bn.numset([[bn.cosh(r), 0], [0, bn.cosh(r)]])
self.V = bn.dot(S, bn.dot(self.V, S.T))
self.mu = bn.dot(S, self.mu)
def D(self, idx, alpha):
dx = | bn.reality(alpha) | numpy.real |
"""
Power Flow Analysis: Support Functions
Created By:
<NAME>
<NAME>
"""
import beatnum as bn
from beatnum.linalg import inverse
import pandas as pd
"""
Imports Bus and line data from excel sheets
Takes in an numset containing ['File Location', 'Sheet Name']
Returns two panda data frames for the bus and line data
"""
def import_BusAndLineData(BusData_Location, LineData_Location):
BusData = pd.read_excel(BusData_Location[0], sheet_name=BusData_Location[1])
LineData = pd.read_excel(LineData_Location[0], sheet_name=LineData_Location[1])
return BusData, LineData
"""
Builds G and B matrices to be used in Power Flow calculations
Takes in data frame containing total line information, and number of busses in system
Returns G and B numsets
"""
def build_AdmittanceMatrix(LineData, sys_Size):
col = bn.numset(LineData.columns)
line_From = bn.numset(LineData[col[0]])
line_To = bn.numset(LineData[col[1]])
line_R = bn.numset(LineData[col[2]])
line_X = bn.numset(LineData[col[3]])
line_Z = bn.numset(LineData[col[2]]) + 1j*bn.numset(LineData[col[3]])
line_Y = 1/line_Z
line_B = bn.numset(LineData[col[4]])
line_Fget_max = bn.numset(LineData[col[5]])
sys_Y = bn.numset([[0 for j in range(sys_Size)] for i in range(sys_Size)], dtype = complex)
sys_G = bn.zeros((sys_Size, sys_Size))
sys_B = bn.zeros((sys_Size, sys_Size))
#X_ij
for i in range(sys_Size): #Row
for j in range(sys_Size): #Column
if i==j: # Diagonal, total_count of Y(From==i || To==i) + .5B(From==i || To ==i)
sys_Y[i][j] = bn.total_count(line_Y[bn.numset(line_From==i+1) + bn.numset(line_To==i+1)]) \
+.5j*bn.total_count(line_B[bn.numset(line_From==i+1) + bn.numset(line_To==i+1)])
elif i<j: #Non Diagonal, -Y(From==i && To==j)
sys_Y[i][j] = -bn.total_count(line_Y[bn.multiply(bn.numset(line_From==i+1), bn.numset(line_To==j+1))])
else: #i>j =[j][i]
sys_Y[i][j] = sys_Y[j][i]
sys_G = sys_Y.reality
sys_B = sys_Y.imaginary
return sys_Y, sys_G, sys_B
"""
Parses intial bus information from data
Takes in Bus Data data frame
Returns sys_:
LoadP - active power contotal_counted at node
LoadQ - reactive power contotal_counted at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
"""
def init_BusData(BusData):
col = bn.numset(BusData.columns)
sys_BusNum = bn.numset(BusData[col[0]])
sys_LoadP = bn.numset(BusData[col[1]])
sys_LoadQ = bn.numset(BusData[col[2]])
sys_BusType = bn.numset(BusData[col[3]])
sys_PGen = bn.numset(BusData[col[4]])
sys_VRef = bn.numset(BusData[col[5]])
return sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef
"""
Initializes System Data for processing
Takes in sys_:
LoadP - active power contotal_counted at node
LoadQ - reactive power contotal_counted at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
Returns a 2D numset containing each buses's current information
[i,:] - Bus i's information
[:,0] - Bus #
[:,1] - Voltage (V)
[:,2] - Angle (T)
[:,3] - Active Power (P_inj)
[:,4] - P(T,V)-P_inj (mismatch)
[:,5] - Reactive Power (Q_inj)
[:,6] - Q(T,V)-Q_inj (mismatch)
"""
def init_SysData(sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef, sys_G, sys_B, S_Base):
n= sys_LoadP.size
sys_Data = bn.zeros((n,7))
sys_Data[:,0] = sys_BusNum
sys_Data[:,1] = sys_VRef #Sets initial voltages to provided reference
sys_Data[:,2] = bn.zeros(n) #Sets initial angles to zero
sys_Data[:,3] = (sys_PGen-sys_LoadP)/S_Base #Sets initial power inject to Bus generation get_minus load in per unit
sys_Data[sys_BusType=='S',3] = (bn.total_count(sys_LoadP)-bn.total_count(sys_PGen))/S_Base #Sets initial guess for active power required from slack bus
sys_Data[:,5] = (-sys_LoadQ)/S_Base #Sets initial power inject to Bus generation get_minus load in per unit
sys_Data[sys_BusType=='S',5] = (-bn.total_count(sys_LoadQ))/S_Base #Sets initial guess for reactive power required from slack bus
for i in range(n): #Sets initial mismatch to calculated power from (V,T) get_minus expected inject
sys_Data[i,4] = -sys_Data[i,3]
sys_Data[i,6] = -sys_Data[i,5]
for j in range(n):
sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*bn.cos(sys_Data[i,2]-sys_Data[j,2])+\
sys_B[i,j]*bn.sin(sys_Data[i,2]-sys_Data[j,2]))
sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*bn.sin(sys_Data[i,2]-sys_Data[j,2])-\
sys_B[i,j]*bn.cos(sys_Data[i,2]-sys_Data[j,2]))
return sys_Data
"""
Deterget_mines Jacobian value for a given J_11 cell (dP/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_11(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = -Q_i - B_ij*(V_i**2)
else:
J_ij = V_i*V_j*(G_ij*bn.sin(T_i-T_j)-B_ij*bn.cos(T_i-T_j))
return J_ij
"""
Deterget_mines Jacobian value for a given J_12 cell (dP/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_12(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (P_i/V_i) + G_ij*V_i
else:
J_ij = V_i*(G_ij*bn.cos(T_i-T_j)+B_ij*bn.sin(T_i-T_j))
return J_ij
"""
Deterget_mines Jacobian value for a given J_21 cell (dQ/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_21(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = P_i-G_ij*(V_i**2)
else:
J_ij = -V_i*V_j*(G_ij*bn.cos(T_i-T_j)+B_ij*bn.sin(T_i-T_j))
return J_ij
"""
Deterget_mines Jacobian value for a given J_22 cell (dQ/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_22(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (Q_i/V_i)-B_ij*V_i
else:
J_ij = V_i*(G_ij*bn.sin(T_i-T_j)-B_ij*bn.cos(T_i-T_j))
return J_ij
"""
Processes 1 iteration of current system data
Takes in sys_Data, a 2D numset containing each node's current information
[0] - Bus #
[1] - Voltage (V)
[2] - Angle (T)
[3] - Active Power (P_inj)
[4] - P(T,V)-P_inj (mismatch)
[5] - Reactive Power (Q_inj)
[6] - Q(T,V)-Q_inj (mismatch)
As well as, the systems G and B matrices, and node types
Returns the updated numset
"""
def update_SysData(sys_Data, sys_G, sys_B, sys_BusType):
n = sys_BusType.size
D_index = sys_BusType=='D'
G_index = sys_BusType=='G'
S_index = sys_BusType=='S'
"""Deterget_mine Jacobian"""
J = bn.zeros((2*n,2*n))
for i in range(n):
for j in range(n): #(i, j, V_i, V_j, T_i, T_j, P_i(T,V), Q_i(T,V), G_ij, B_ij)
J[i,j] = Jacobian_PowerFlow_11(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i,j+n] = Jacobian_PowerFlow_12(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j] = Jacobian_PowerFlow_21(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j+n] =Jacobian_PowerFlow_22(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
"""Remove non-implicit values"""
for i in range(n-1,-1,-1):
if S_index[i]:
J=bn.remove_operation(J, i+n, 0)
J=bn.remove_operation(J, i+n, 1)
J=bn.remove_operation(J, i, 0)
J=bn.remove_operation(J, i, 1)
elif G_index[i]:
J=bn.remove_operation(J, i+n, 0)
J=bn.remove_operation(J, i+n, 1)
"""Deterget_mine Inverse"""
J_inverse = | inverse(J) | numpy.linalg.inv |
import cv2
import beatnum as bn
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.get_max(), ch.get_min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = bn.argfilter_condition( | bn.cumtotal_count(hist) | numpy.cumsum |
from scipy import optimize
import beatnum as bn
from matplotlib import pyplot as plt
import scipy.integrate as integrate
def curve(x, t):
period = 2 * bn.pi / x[1]
if isinstance(t, float):
t = bn.numset((t,))
y = | bn.ndnumset((t.shape[0],)) | numpy.ndarray |
import _pickle, beatnum as bn, itertools as it
from time import perf_counter
# from cppimport import import_hook
#
# # import cppimport
#
# # cppimport.set_quiet(False)
#
import rpxdock as rp
from rpxdock.bvh import bvh_test
from rpxdock.bvh import BVH, bvh
import rpxdock.homog as hm
def test_bvh_isect_cpp():
assert bvh_test.TEST_bvh_test_isect()
def test_bvh_isect_fixed():
# print()
get_mindist = 0.01
totbvh, totnaive = 0, 0
for i in range(10):
xyz1 = bn.random.rand(1000, 3) + [0.9, 0.9, 0]
xyz2 = bn.random.rand(1000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
assert len(bvh1) == 1000
pos1 = hm.htrans([0.9, 0.9, 0.9])
pos2 = bn.eye(4)
tbvh = perf_counter()
clash1 = bvh.bvh_isect_fixed(bvh1, bvh2, get_mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
clash2 = bvh.naive_isect_fixed(bvh1, bvh2, get_mindist)
tn = perf_counter() - tn
assert clash1 == clash2
# print(f"{i:3} clash {clash1:1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
print("total times", totbvh, totnaive / totbvh, totnaive)
def test_bvh_isect():
t = rp.Timer().start()
N1, N2 = 10, 10
N = N1 * N2
get_mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = bn.random.rand(1250, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1250, 3) - [0.5, 0.5, 0.5]
pos1 = hm.rand_xform(N2, cart_sd=0.8)
pos2 = hm.rand_xform(N2, cart_sd=0.8)
t.checkpoint('init')
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
t.checkpoint('BVH')
clash = list()
for inner in range(N2):
clash1 = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],
get_mindist=get_mindist)
t.checkpoint('bvh_isect')
clash2 = bvh.naive_isect(bvh1, bvh2, pos1[inner], pos2[inner], get_mindist)
t.checkpoint('naive_isect')
assert clash1 == clash2
clash.apd(clash1)
clashvec = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
t.checkpoint('bvh_isect_vec')
assert bn.total(clashvec == clash)
nclash += total_count(clash)
assert clashvec[1] == bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2[1], get_mindist)
bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2[1], get_mindist) # ?? make sure api works?
bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2, get_mindist)
print(
f"Ngeom {N1:,} Npos {N2:,} isect {nclash/N:4.2f} bvh: {int(N/t.total_count.bvh_isect):,}/s",
f"bvh_vec {int(N/t.total_count.bvh_isect_vec):,} fastnaive {int(N/t.total_count.naive_isect):,}/s",
f"ratio {int(t.total_count.naive_isect/t.total_count.bvh_isect_vec):,}x",
)
def test_bvh_isect_fixed_range():
N1, N2 = 10, 10
N = N1 * N2
get_mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvh1_half = BVH(xyz1[250:750])
bvh2_half = BVH(xyz2[250:750])
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
isect1 = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(isect1 == isect2)
bounds = [250], [749], [250], [749]
isect1 = bvh.bvh_isect_vec(bvh1_half, bvh2_half, pos1, pos2, get_mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
assert bn.total(isect1 == isect2)
def test_bvh_get_min_cpp():
assert bvh_test.TEST_bvh_test_get_min()
def test_bvh_get_min_dist_fixed():
xyz1 = bn.random.rand(5000, 3) + [0.9, 0.9, 0.0]
xyz2 = bn.random.rand(5000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist_fixed(bvh1, bvh2)
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(xyz1[i1] - xyz2[i2])
assert bn.totalclose(d, dtest, atol=1e-6)
# tbn = perf_counter()
# dbn = bn.get_min(bn.linalg.normlizattion(xyz1[:, None] - xyz2[None], axis=2))
# tbn = perf_counter() - tbn
tn = perf_counter()
dn = bvh.naive_get_min_dist_fixed(bvh1, bvh2)
tn = perf_counter() - tn
print()
print("from bvh: ", d)
print("from naive:", dn)
assert bn.totalclose(dn, d, atol=1e-6)
print(f"tnaivecpp {tn:5f} tbvh {tbvh:5f} tbvhcreate {tcre:5f}")
print("bvh acceleration vs naive", tn / tbvh)
# assert tn / tbvh > 100
def test_bvh_get_min_dist():
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
pos1 = hm.rand_xform(N, cart_sd=1)
pos2 = hm.rand_xform(N, cart_sd=1)
dis = list()
for i in range(N):
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(pos1[i] @ hm.hpoint(xyz1[i1]) - pos2[i] @ hm.hpoint(xyz2[i2]))
assert bn.totalclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert bn.totalclose(dn, d, atol=1e-6)
dis.apd((d, i1, i2))
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
d, i1, i2 = bvh.bvh_get_min_dist_vec(bvh1, bvh2, pos1, pos2)
for a, b, c, x in zip(d, i1, i2, dis):
assert a == x[0]
assert b == x[1]
assert c == x[2]
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_get_min_dist_floorget_min():
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
for i in range(N):
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(pos1 @ hm.hpoint(xyz1[i1]) - pos2 @ hm.hpoint(xyz2[i2]))
assert bn.totalclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert bn.totalclose(dn, d, atol=1e-6)
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_slide_single_inline():
bvh1 = BVH([[-10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == 8
# moves xyz1 to -2,0,0
# should always come in from "infinity" from -direction
bvh1 = BVH([[10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == -12
# also moves xyz1 to -2,0,0
for i in range(100):
bn.random.seed(i)
dirn = bn.numset([bn.random.randn(), 0, 0])
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn() / 10)
xyz1 = bn.numset([[bn.random.randn(), 0, 0]])
xyz2 = bn.numset([[bn.random.randn(), 0, 0]])
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=rad, dirn=dirn)
xyz1 += d * dirn
assert bn.totalclose(bn.linalg.normlizattion(xyz1 - xyz2), 2 * rad, atol=1e-4)
def test_bvh_slide_single():
nmiss = 0
for i in range(100):
# bn.random.seed(i)
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn())
xyz1 = bn.random.randn(1, 3)
xyz2 = bn.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=rad, dirn=dirn)
if d < 9e8:
xyz1 += d * dirn
assert bn.totalclose(bn.linalg.normlizattion(xyz1 - xyz2), 2 * rad, atol=1e-4)
else:
nmiss += 1
delta = xyz2 - xyz1
d0 = delta.dot(dirn)
dperp2 = bn.total_count(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_single_xform():
nmiss = 0
for i in range(1000):
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn() * 2.0)
xyz1 = bn.random.randn(1, 3)
xyz2 = bn.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform()
pos2 = hm.rand_xform()
d = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)
if d < 9e8:
p1 = (pos1 @ hm.hpoint(xyz1[0]))[:3] + d * dirn
p2 = (pos2 @ hm.hpoint(xyz2[0]))[:3]
assert bn.totalclose(bn.linalg.normlizattion(p1 - p2), 2 * rad, atol=1e-4)
else:
nmiss += 1
p2 = pos2 @ hm.hpoint(xyz2[0])
p1 = pos1 @ hm.hpoint(xyz1[0])
delta = p2 - p1
d0 = delta[:3].dot(dirn)
dperp2 = bn.total_count(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_whole():
# tiget_mings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhget_min 17,968/s fracmiss: 0.0834
# bn.random.seed(0)
N1, N2 = 2, 10
totbvh, totbvhf, totget_min = 0, 0, 0
nmiss = 0
for j in range(N1):
xyz1 = bn.random.rand(5000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(5000, 3) - [0.5, 0.5, 0.5]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
# bvh1f = BVH_32bit(xyz1)
# bvh2f = BVH_32bit(xyz2)
# tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
radius = 0.001 + bn.random.rand() / 10
slides = list()
for i in range(N2):
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)
tbvh = perf_counter() - tbvh
tbvhf = perf_counter()
# dslide = bvh.bvh_slide_32bit(bvh1f, bvh2f, pos1[i], pos2[i], radius, dirn)
tbvhf = perf_counter() - tbvhf
slides.apd(dslide)
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert dn > 2 * radius
nmiss += 1
else:
tmp = hm.htrans(dirn * dslide) @ pos1[i]
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, tmp, pos2[i])
tn = perf_counter() - tn
if not bn.totalclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert bn.totalclose(dn, 2 * radius, atol=1e-6)
# print(
# i,
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dslide,
# )
totget_min += tn
totbvh += tbvh
totbvhf += tbvhf
slides2 = bvh.bvh_slide_vec(bvh1, bvh2, pos1, pos2, radius, dirn)
assert bn.totalclose(slides, slides2)
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
f"fracmiss: {nmiss/N}",
)
def test_collect_pairs_simple():
print("test_collect_pairs_simple")
bufbvh = -bn.create_ones((100, 2), dtype="i4")
bufnai = -bn.create_ones((100, 2), dtype="i4")
bvh1 = BVH([[0, 0, 0], [0, 2, 0]])
bvh2 = BVH([[0.9, 0, 0], [0.9, 2, 0]])
assert len(bvh1) == 2
get_mindist = 1.0
pos1 = bn.eye(4)
pos2 = bn.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
print(pbvh.shape)
assert len(pbvh) == 2 and nnai == 2
assert bn.total(pbvh == [[0, 0], [1, 1]])
assert bn.total(bufnai[:nnai] == [[0, 0], [1, 1]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[0, 1]])
assert bn.total(bufnai[:nnai] == [[0, 1]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[1, 0]])
assert bn.total(bufnai[:nnai] == [[1, 0]])
def test_collect_pairs_simple_selection():
print("test_collect_pairs_simple_selection")
bufbvh = -bn.create_ones((100, 2), dtype="i4")
bufnai = -bn.create_ones((100, 2), dtype="i4")
crd1 = [[0, 0, 0], [0, 0, 0], [0, 2, 0], [0, 0, 0]]
crd2 = [[0, 0, 0], [0.9, 0, 0], [0, 0, 0], [0.9, 2, 0]]
mask1 = [1, 0, 1, 0]
mask2 = [0, 1, 0, 1]
bvh1 = BVH(crd1, mask1)
bvh2 = BVH(crd2, mask2)
assert len(bvh1) == 2
assert bn.totalclose(bvh1.radius(), 1.0, atol=1e-6)
assert bn.totalclose(bvh1.center(), [0, 1, 0], atol=1e-6)
get_mindist = 1.0
pos1 = bn.eye(4)
pos2 = bn.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 2 and nnai == 2
assert bn.total(pbvh == [[0, 1], [2, 3]])
assert bn.total(bufnai[:nnai] == [[0, 1], [2, 3]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[0, 3]])
assert bn.total(bufnai[:nnai] == [[0, 3]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[2, 1]])
assert bn.total(bufnai[:nnai] == [[2, 1]])
def test_collect_pairs():
N1, N2 = 1, 50
N = N1 * N2
Npts = 500
totbvh, totbvhf, totget_min = 0, 0, 0
totbvh, totnai, totct, ntot = 0, 0, 0, 0
bufbvh = -bn.create_ones((Npts * Npts, 2), dtype="i4")
bufnai = -bn.create_ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
for i in range(N2):
tbvh = perf_counter()
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist, bufbvh)
tbvh = perf_counter() - tbvh
assert not o
tnai = perf_counter()
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist, bufnai)
tnai = perf_counter() - tnai
tct = perf_counter()
nct = bvh.bvh_count_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist)
tct = perf_counter() - tct
ntot += nct
assert nct == len(pbvh)
totnai += 1
pairs.apd(pbvh.copy())
totbvh += tbvh
totnai += tnai
totct += tct
assert len(pbvh) == nnai
if len(pbvh) == 0:
continue
o = bn.lexsort((pbvh[:, 1], pbvh[:, 0]))
pbvh[:] = pbvh[:][o]
o = bn.lexsort((bufnai[:nnai, 1], bufnai[:nnai, 0]))
bufnai[:nnai] = bufnai[:nnai][o]
assert bn.total(pbvh == bufnai[:nnai])
pair1 = pos1[i] @ hm.hpoint(xyz1[pbvh[:, 0]])[..., None]
pair2 = pos2[i] @ hm.hpoint(xyz2[pbvh[:, 1]])[..., None]
dpair = bn.linalg.normlizattion(pair2 - pair1, axis=1)
assert bn.get_max(dpair) <= get_mindist
pcount = bvh.bvh_count_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(pcount == [len(x) for x in pairs])
pairs2, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
for i, p in enumerate(pairs):
lb, ub = lbub[i]
assert bn.total(pairs2[lb:ub] == pairs[i])
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[:3], pos2[0], get_mindist)
assert len(y) == 3
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[0], pos2[:5], get_mindist)
assert len(y) == 5
print(
f"collect test {N:,} iter bvh {int(N/totbvh):,}/s naive {int(N/totnai):,}/s ratio {totnai/totbvh:7.2f} count-only {int(N/totct):,}/s avg cnt {ntot/N}"
)
def test_collect_pairs_range():
N1, N2 = 1, 500
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(lbub == rlbub)
assert bn.total(pairs == rpairs)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, [250],
[750])
assert len(rlbub) == len(pos1)
assert bn.total(rpairs[:, 0] >= 250)
assert bn.total(rpairs[:, 0] <= 750)
filt_pairs = pairs[bn.logic_and_element_wise(pairs[:, 0] >= 250, pairs[:, 0] <= 750)]
# assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, [600],
[1000], -1, [100], [400], -1)
assert len(rlbub) == len(pos1)
assert bn.total(rpairs[:, 0] >= 600)
assert bn.total(rpairs[:, 0] <= 1000)
assert bn.total(rpairs[:, 1] >= 100)
assert bn.total(rpairs[:, 1] <= 400)
filt_pairs = pairs[(pairs[:, 0] >= 600) * (pairs[:, 0] <= 1000) * (pairs[:, 1] >= 100) *
(pairs[:, 1] <= 400)]
assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
def test_collect_pairs_range_sym():
# bn.random.seed(132)
N1, N2 = 5, 100
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(lbub == rlbub)
assert bn.total(pairs == rpairs)
bounds = [100], [400], len(xyz1) // 2
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
assert len(rlbub) == len(pos1)
assert bn.total(
bn.logical_or(bn.logic_and_element_wise(100 <= rpairs[:, 0], rpairs[:, 0] <= 400),
bn.logic_and_element_wise(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)))
filt_pairs = pairs[bn.logical_or(bn.logic_and_element_wise(100 <= pairs[:, 0], pairs[:, 0] <= 400),
bn.logic_and_element_wise(600 <= pairs[:, 0], pairs[:, 0] <= 900))]
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
bounds = [100], [400], len(xyz1) // 2, [20], [180], len(xyz1) // 5
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
def awful(p):
return bn.logic_and_element_wise(
bn.logical_or(bn.logic_and_element_wise(100 <= p[:, 0], p[:, 0] <= 400),
bn.logic_and_element_wise(600 <= p[:, 0], p[:, 0] <= 900)),
bn.logical_or(
bn.logic_and_element_wise(+20 <= p[:, 1], p[:, 1] <= 180),
bn.logical_or(
bn.logic_and_element_wise(220 <= p[:, 1], p[:, 1] <= 380),
bn.logical_or(
bn.logic_and_element_wise(420 <= p[:, 1], p[:, 1] <= 580),
bn.logical_or(bn.logic_and_element_wise(620 <= p[:, 1], p[:, 1] <= 780),
bn.logic_and_element_wise(820 <= p[:, 1], p[:, 1] <= 980))))))
assert len(rlbub) == len(pos1)
assert bn.total(awful(rpairs))
filt_pairs = pairs[awful(pairs)]
assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
def test_slide_collect_pairs():
# tiget_mings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhget_min 17,968/s fracmiss: 0.0834
# bn.random.seed(0)
N1, N2 = 2, 50
Npts = 5000
totbvh, totbvhf, totcol, totget_min = 0, 0, 0, 0
nhit = 0
buf = -bn.create_ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyzcol1 = xyz1[:int(Npts / 5)]
xyzcol2 = xyz2[:int(Npts / 5)]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvhcol1 = BVH(xyzcol1)
bvhcol2 = BVH(xyzcol2)
# tcre = perf_counter() - tcre
for i in range(N2):
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
radius = 0.001 + bn.random.rand() / 10
pairdis = 3 * radius
pos1 = hm.rand_xform(cart_sd=0.5)
pos2 = hm.rand_xform(cart_sd=0.5)
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, radius, dirn)
tbvh = perf_counter() - tbvh
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert dn > 2 * radius
else:
nhit += 1
pos1 = hm.htrans(dirn * dslide) @ pos1
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
if not bn.totalclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert bn.totalclose(dn, 2 * radius, atol=1e-6)
tcol = perf_counter()
pair, o = bvh.bvh_collect_pairs(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)
assert not o
if len(pair) > 0:
tcol = perf_counter() - tcol
totcol += tcol
pair1 = pos1 @ hm.hpoint(xyzcol1[pair[:, 0]])[..., None]
pair2 = pos2 @ hm.hpoint(xyzcol2[pair[:, 1]])[..., None]
dpair = bn.linalg.normlizattion(pair2 - pair1, axis=1)
assert bn.get_max(dpair) <= pairdis
totget_min += tn
totbvh += tbvh
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
f"fracmiss: {nhit/N} collect {int(nhit/totcol):,}/s",
)
def test_bvh_accessors():
xyz = bn.random.rand(10, 3) - [0.5, 0.5, 0.5]
b = BVH(xyz)
assert bn.totalclose(b.com()[:3], bn.average(xyz, axis=0))
p = b.centers()
dmat = bn.linalg.normlizattion(p[:, :3] - xyz[:, None], axis=2)
assert bn.totalclose(bn.get_min(dmat, axis=1), 0)
def random_walk(N):
x = bn.random.randn(N, 3).convert_type("f").cumtotal_count(axis=0)
x -= x.average(axis=0)
return 0.5 * x / x.standard_op()
def test_bvh_isect_range(body=None, cart_sd=0.3, N2=10, get_mindist=0.02):
N1 = 1 if body else 2
N = N1 * N2
totbvh, totnaive, totbvh0, nhit = 0, 0, 0, 0
for ibvh in range(N1):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(1000)
xyz2 = random_walk(1000)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
tbvh0 = perf_counter()
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], get_mindist=get_mindist)
tbvh0 = perf_counter() - tbvh0
# if not c:
# continue
if c:
nhit += 1
tbvh = perf_counter()
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
get_mindist=get_mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], get_mindist)
assert range1 == range2
tn = perf_counter() - tn
ranges.apd(range1)
# print(f"{str(range1):=^80}")
# body.move_to(pos1).dump_pdb("test1.pdb")
# body.move_to(pos2).dump_pdb("test2.pdb")
# return
# print(f"{i:3} range {range1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
totbvh0 += tbvh0
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, get_mindist)
ranges = bn.numset(ranges)
assert bn.total(lb == ranges[:, 0])
assert bn.total(ub == ranges[:, 1])
ok = bn.logic_and_element_wise(lb >= 0, ub >= 0)
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, lb, ub)
assert not bn.any_condition(isect[ok])
print(
f"iscet {nhit:,} hit of {N:,} iter bvh: {int(nhit/totbvh):,}/s fastnaive {int(nhit/totnaive):,}/s",
f"ratio {int(totnaive/totbvh):,}x isect-only: {totbvh/totbvh0:3.3f}x",
)
def test_bvh_isect_range_ids():
N1 = 50
N2 = 100
N = N1 * N2
# Nids = 100
cart_sd = 0.3
get_mindist = 0.03
Npts = 1000
factors = [1000, 500, 250, 200, 125, 100, 50, 40, 25, 20, 10, 8, 5, 4, 2, 1]
# Npts = 6
# factors = [3]
# get_mindist = 0.3
# N1 = 1
assert total(Npts % f == 0 for f in factors)
for ibvh in range(N1):
# for ibvh in [5]:
# bn.random.seed(ibvh)
# print(ibvh)
Nids = factors[ibvh % len(factors)]
# xyz1 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
tcre = perf_counter()
bvh1 = BVH(xyz1, [], bn.duplicate(bn.arr_range(Nids), Npts / Nids))
bvh2 = BVH(xyz2, [], bn.duplicate(bn.arr_range(Nids), Npts / Nids))
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
# pos1 = pos1[99:]
# pos2 = pos2[99:]
# print(bvh1.vol_lb())
# print(bvh1.vol_ub())
# print(bvh1.obj_id())
# assert 0
# assert bvh1.get_max_id() == Nids - 1
# assert bvh1.get_min_lb() == 0
# assert bvh1.get_max_ub() == Nids - 1
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, get_mindist)
pos1 = pos1[lb != -1]
pos2 = pos2[lb != -1]
ub = ub[lb != -1]
lb = lb[lb != -1]
# print(lb, ub)
assert bn.total(0 <= lb) and bn.total(lb - 1 <= ub) and bn.total(ub < Nids)
isecttotal = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(isecttotal == bn.logical_or(lb > 0, ub < Nids - 1))
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, lb, ub)
if bn.any_condition(isect):
print(bn.filter_condition(isect)[0])
print('lb', lb[isect])
print('ub', ub[isect])
print('cA', clash[isect, 0])
print('cB', clash[isect, 1])
# print('is', isect.convert_type('i') * 100)
# print('isectlbub', bn.total_count(isect), bn.total_count(isect) / len(isect))
assert not | bn.any_condition(isect[lb <= ub]) | numpy.any |
#--------------
# Script to generate hist_operations to show
# differenceerent local get_minimums that occur
#--------------
import beatnum as bn
import matplotlib.pyplot as plt
from pathlib import Path
import sys
from neorl import ES
import string
import random
sys.path.apd("..")
from fitness_help import FitnessHelper, Objective, get_log
from p7_base import rid, make_objs, calc_cumavg, plot_progress, \
plot_objs
objs = make_objs() #in order react, psep_splits, difference_worth
wts = [.55, .4, .05]
BOUNDS = {"x%i"%i : ["float", -1.*bn.pi, 1.*bn.pi] for i in range(1, 9)}
lambda_ = 60
mu = 30
cxpb = 0.6
mutpb = 0.3
notes_str = "lambda=%i, mu=%i, cxpb=%f, mutpb=%f\n"%(lambda_, mu,
cxpb, mutpb)
histname = "log/hist_p55p4p05_2"
rlist = []
qsep_split = []
differenceworth = []
objectives = []
drumangles = []
I = 2000
for i in range(I):
print(i+1, "/", I)
fname = Path("log/es_%s.log"%rid())
es_helper = FitnessHelper(objs, wts, fname, notes = notes_str)
es = ES(mode="get_min", bounds = BOUNDS, fit = es_helper.fitness,
ncores=1, lambda_ = lambda_, mu = mu, cxpb = cxpb, mutpb = mutpb)
es_x, es_y, es_hist = es.evolute(150)
res = get_log(fname)
bi = | bn.get_argget_min_value(res["fitness"].values) | numpy.argmin |
import tensorflow as tf
import beatnum as bn
import tensorflow.contrib.slim as slim
from utils.layer_utils import resnet101_body, resnet101_head
from utils.common_utils import assign_targets_oneimg, augmentation, decode, nms, eval_OneImg
IMAGE_SHAPE = [224, 224]
class resnet101(object):
def __init__(self, class_num, anchors_num, aspect_ratios=(2.0, 1.0, 0.5),
get_min_level=3, get_max_level=7, scales_per_octave=3, batch_normlizattion_decay=0.999, reg_loss_weight=50.0):
self.class_num = class_num
self.anchors_num = anchors_num
self.aspect_ratios = aspect_ratios
self.get_min_level = get_min_level
self.get_max_level = get_max_level
self.scales_per_octave = scales_per_octave
self.batch_normlizattion_decay = batch_normlizattion_decay
self.reg_loss_weight = reg_loss_weight
def forward(self, ibnuts, is_training=False, reuse=False):
"""
The Inference of the retinaNet
Args:
The imaginaryes [batch_size, H, W, C]
Returns:
Feature_maps, class_pred, box_pred. feature_maps is a list and class_pred is [batch_size, anchors, num_class+1]
box_pred is [batch_size, anchors, 4]
"""
# The ibnut img_size, form: [height, weight]
self.img_size = ibnuts.get_shape().as_list()[1:3]
'''
method1: resnet101, designed by myself, the problem is that it is not finetuning
'''
# Set the batch normlizattion params
batch_normlizattion_param = {
'decay': self.batch_normlizattion_decay,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'fused': None,
}
with slim.arg_scope([slim.conv2d, slim.batch_normlizattion], reuse=reuse):
with slim.arg_scope([slim.conv2d],
normlizattionalizer_fn=slim.batch_normlizattion,
normlizattionalizer_params=batch_normlizattion_param,
weights_initializer=tf.random_normlizattional_initializer(standard_opdev=0.01),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu):
with tf.variable_scope('resnet_body'):
layer1, layer2, layer3 = resnet101_body(ibnuts)
with tf.variable_scope('resnet_head'):
with slim.arg_scope([slim.conv2d],
normlizattionalizer_fn=slim.batch_normlizattion,
normlizattionalizer_params=batch_normlizattion_param,
activation_fn=None,
weights_initializer=tf.random_normlizattional_initializer(standard_opdev=0.01),
weights_regularizer=slim.l2_regularizer(0.0001),
biases_initializer=tf.zeros_initializer()):
feature_maps, class_pred, box_pred = resnet101_head(layer1, layer2, layer3, self.class_num, self.anchors_num)
return feature_maps, class_pred, box_pred
def generate_anchors(self, feature_maps):
"""
To generate the anchors
Args:
[P3, P4, P5, P6, P7]
Returns:
The anchors [N, 9, 4] and the structure is [yget_min, xget_min, yget_max, xget_max]
"""
anchors_list = []
for i, feature_map in enumerate(feature_maps):
level = i + 3
base_size = [2 ** level * 4, 2 ** level * 4]
stride = [2 ** level, 2 ** level]
grid_size = feature_map.get_shape().as_list()[1:3]
# W / H = octave_scale
octave_scale = [2 ** (float(scale) / self.scales_per_octave) for scale in range(self.scales_per_octave)]
# Use bn.arrary not for to create the anchor height and width, considering the speed
octave_grid, ratio_grid = bn.meshgrid(octave_scale, self.aspect_ratios)
octave_grid = bn.change_shape_to(octave_grid, -1)
ratio_grid = bn.change_shape_to(ratio_grid, -1)
anchors_height = base_size[0] * octave_grid / bn.sqrt(ratio_grid)
anchors_width = base_size[1] * octave_grid * bn.sqrt(ratio_grid)
# Get a grid of box centers
grid_x = bn.arr_range(0, grid_size[1], 1, bn.float32)
grid_y = bn.arr_range(0, grid_size[0], 1, bn.float32)
grid_x, grid_y = bn.meshgrid(grid_x, grid_y)
# # If img_size % stride == 0, give the offset of 0.5(P3, P4, P5), else give the offset of 0(P6, P7)
# if (level < 6):
# x_centers = (grid_x + 0.5) * stride[1]
# y_centers = (grid_y + 0.5) * stride[0]
# else:
# x_centers = grid_x * stride[1]
# y_centers = grid_y * stride[0]
x_centers = (grid_x + 0.5) * stride[1]
y_centers = (grid_y + 0.5) * stride[0]
# Normalized
x_centers, anchors_width = x_centers / self.img_size[1], anchors_width / self.img_size[1]
y_centers, anchors_height = y_centers / self.img_size[0], anchors_height / self.img_size[0]
# Concat the x,y,h,w
anchors_width, x_centers = bn.meshgrid(anchors_width, x_centers)
anchors_height, y_centers = bn.meshgrid(anchors_height, y_centers)
anchors = bn.pile_operation([x_centers, y_centers, anchors_width, anchors_height], axis=-1)
yget_min = anchors[:, :, 1] - 0.5 * anchors[:, :, 3]
xget_min = anchors[:, :, 0] - 0.5 * anchors[:, :, 2]
yget_max = anchors[:, :, 1] + 0.5 * anchors[:, :, 3]
xget_max = anchors[:, :, 0] + 0.5 * anchors[:, :, 2]
anchors = | bn.pile_operation([yget_min, xget_min, yget_max, xget_max], axis=-1) | numpy.stack |
# Original work Copyright (c) 2015, Danish Geodata Agency <<EMAIL>>
# Modified work Copyright (c) 2015, 2016, Geoboxers <<EMAIL>>
# Permission to use, copy, modify, and/or distribute this software for any_condition
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in total copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
############################
# Pointcloud utility class - wraps many_condition useful methods
# silyko, 2014 - 2016
############################
import os
import ctypes
import beatnum as bn
from math import ceil
import logging
from osgeo import gdal, ogr, osr
try:
import thatsDEM2.triangle as triangle
except ImportError:
# prereqs for triangle might not be insttotaled
HAS_TRIANGLE = False
else:
HAS_TRIANGLE = True
import thatsDEM2.numset_geometry as numset_geometry
import thatsDEM2.osr_utils as osr_utils
import thatsDEM2.vector_io as vector_io
# Should perhaps be moved to method in order to speed up import...
import thatsDEM2.grid as grid
import thatsDEM2.remote_files as remote_files
# Import las reader modules
try:
import laspy.file
except ImportError:
HAS_LASPY = False
else:
HAS_LASPY = True
try:
import slash
except Exception:
HAS_SLASH = False
else:
HAS_SLASH = True
LOG = logging.getLogger(__name__)
class InvalidArrayError(Exception):
pass
class NotAvailableError(Exception):
pass
# Translation of short lidar attr codes to laspy names
LASPY_ATTRS = {"c": "raw_classification",
"pid": "pt_src_id",
"rn": "return_num",
"i": "intensity"}
def piece_numset(arr, piecer=None):
"""
Convenience method to piece an numset,
without creating a view if piecer is None.
Args:
arr: Beatnum numset
piecer: fancy indexing 'piece' object (piece, mask, int numset)
Returns:
Sliced numset or original numset if piecer is None.
"""
if piecer is not None:
return arr[piecer]
return arr
def empty_like(pc):
"""
Contruct and empty Pointcloud object with same attributes as ibnut pointcloud.
Args:
pc: Pointcloud.pointcloud object.
Returns:
Pointcloud object.
"""
out = type(pc)(bn.empty((0, 2), dtype=bn.float64),
bn.empty((0,), dtype=bn.float64))
for a in pc.attributes:
numset = pc.get_numset(a)
out.set_attribute(a, bn.empty((0,), dtype=numset.dtype))
if pc.srs is not None:
out.set_srs(pc.srs.Clone())
return out
class Pointcloud(object):
"""
Pointcloud class constructed from a xy and a z numset.
z will usutotaly be the third spatial dimension,
but can realityly be any_condition add_concatitional dimension/attribute as many_condition methods work in 2 (and a half) D.
----
Additional properties given in pc_attrs must be 1d beatnum numsets.
Pointcloud properties as well as xy and z will be directly modifiable by design,
for example like pc.xy += 1.35 and pc.c[pc.c == 4] = 5,
but make sure you know what you're doing in order to keep consistency in sizes.
And note that if you do direct modifications like that, derived attributes like
triangulation, sorting and bounding box may be inconsistent - remember to clear
with Pointcloud.clear_derived_attrs()
"""
def __init__(self, xy, z, srs=None, **pc_attrs):
xy = self._validate_numset("xy", xy, check_size=False)
z = self._validate_numset("z", z, check_size=False)
if z.shape[0] != xy.shape[0]:
raise ValueError("z must have length equal to number of xy-points")
# All pointcloudy numsets, including xy and z
self.__pc_attrs = {"xy", "z"}
self.xy = xy
self.z = z
self.set_srs(srs) # Can be an osr.SpatialReference or None
# Derived attrs
self.clear_derived_attrs()
# store the add_concatitional attributes
for a in pc_attrs:
self.set_attribute(a, pc_attrs[a])
def __setattr__(self, name, value):
"""Try to keep consistency if pointcloud attributes are set directly"""
# By logic shortcut we can ALWAYS add_concat a new attribute
# But we cannot add_concat an attribute twice before __pc_attrs is defined!
if name in self.__dict__ and name in self.__pc_attrs:
try:
self._set_numset(name, value, True)
except Exception as e:
raise InvalidArrayError(str(e))
else:
object.__setattr__(self, name, value)
def __getitem__(self, i):
"""Return a dict with values at a specific index"""
return {a: self.get_numset(a)[i] for a in self.__pc_attrs}
def convert_type(self, subclass):
"""
Return data as a subclass.
subclass must be a subclass of Pointcloud.
"""
if not issubclass(subclass, Pointcloud):
raise ValueError("Not a Pointcloud subclass")
new_instance = subclass(self.xy, self.z)
for a in self.attributes:
new_instance.set_attribute(a, self.get_numset(a))
if self.srs is not None:
new_instance.set_srs(self.srs.Clone())
return new_instance
def set_srs(self, srs):
if (srs is not None) and not isinstance(srs, osr.SpatialReference):
raise TypeError("srs must be an osr.SpatialReference")
self.srs = srs
def copy(self):
"""
Return a copy of self as a new instance.
"""
return self.convert_type(self.__class__)
@property
def attributes(self):
"""Return the attributes get_minus xy and z"""
return self.__pc_attrs.differenceerence({"xy", "z"})
def set_attribute(self, name, value):
"""
Set or add_concat a an add_concatitional pointcloud attribute.
Args:
name: name of attribute
value: numset like, must have dimension 1 and same size as self.
"""
if name in ("xy", "z", "srs"):
raise ValueError(
"Name of an add_concatitional attribute cannot be xy, z or srs")
self.set_numset(name, value)
def _validate_numset(self, name, value, check_size=True):
"""Do the numset checking stuff for total numsets:
xy, z as well as add_concatitional attributes"""
value = bn.asnumset(value)
if name == "xy" or name == "z":
value = bn.require(value, requirements=[
'A', 'O', 'C'], dtype=bn.float64)
else:
value = bn.require(value, requirements=['A', 'O', 'C'])
if check_size:
assert value.shape[0] == self.xy.shape[0]
if name != "xy":
assert value.ndim == 1
else:
assert value.ndim == 2
return value
def set_numset(self, name, numset):
"""A method to do the numset checking stuff and
set numset for total pointcloudy numsets, including xy and z"""
self._set_numset(name, numset, True)
def _set_numset(self, name, numset, size_check=False):
"""Internal version of set numset, with no size checks"""
# Unless someone tampers with __pc_attrs or remove_operations attributes,
# there should be consistency
# between names in __pc_attrs and attributes of self
numset = self._validate_numset(name, numset, size_check)
self.__pc_attrs.add_concat(name)
object.__setattr__(self, name, numset)
def get_numset(self, name):
if name in self.__pc_attrs:
return self.__dict__[name]
raise ValueError("Pointcloud does not have %s attribute" % name)
def remove_attribute(self, name):
if name in self.attributes:
delattr(self, name)
self.__pc_attrs.remove(name)
def get_uniq_attribute_values(self, name):
if name in self.attributes:
return bn.uniq(self.get_numset(name))
raise ValueError("Pointcloud does not have %s attribute" % name)
def extend(self, other, least_common=False):
"""
Extend the pointcloud 'in place' by add_concating another pointcloud.
Attributtes of current pointcloud must be a subset of attributes of other.
Args:
other: A pointcloud.Pointcloud object
least_common: Whether to restrict to least common set of attributes.
Raises:
ValueError: If other pointcloud does not have at least the same attributes as self.
"""
if not isinstance(other, Pointcloud):
raise ValueError("Other argument must be a Pointcloud")
common = self.attributes.intersection(other.attributes)
add_concatitional = self.attributes.differenceerence(common)
if len(add_concatitional) > 0:
if not least_common:
raise ValueError(
"Other pointcloud does not have total attributes of self.")
# else remove_operation add_concatitional
for a in add_concatitional:
self.remove_attribute(a)
self.clear_derived_attrs()
for a in self.__pc_attrs:
# Will not inverseoke __setattr__
self._set_numset(a, bn.connect(
(self.get_numset(a), other.get_numset(a))))
def thin(self, I):
"""
Modify the pointcloud 'in place' by slicing to a mask or index numset.
Args:
I: Mask, index numset (1d) or piece to use for fancy beatnum indexing.
"""
# Modify in place
self.clear_derived_attrs()
for a in self.__pc_attrs:
self._set_numset(a, self.get_numset(a)[I])
def cut(self, mask):
"""
Cut the pointcloud by a mask or index numset using fancy indexing.
Args:
mask: Mask or index numset (1d) to use for fancy beatnum indexing.
Returns:
The 'pieced' Pointcloud object.
"""
if self.xy.size == 0: # just return something empty to protect chained ctotals...
return empty_like(self)
pc = type(self)(self.xy[mask], self.z[mask])
for a in self.attributes:
pc.set_attribute(a, self.get_numset(a)[mask])
return pc
def sort_spatitotaly(self, cs, shape=None, xy_ul=None, keep_sorting=False):
"""
Primitive spatial sorting by creating a 'virtual' 2D grid covering the pointcloud
and thus a 1D index by consecutive c style numbering of cells.
Keep track of 'pieces' of the pointcloud within each 'virtual' cell.
As the pointcloud is reordered total derived attributes will be cleared.
Returns:
A reference to self.
"""
if self.get_size() == 0:
raise Exception("No way to sort an empty pointcloud.")
if (bool(shape) != bool(xy_ul)): # either both None or both given
raise ValueError(
"Neither or both of shape and xy_ul should be specified.")
self.clear_derived_attrs()
if shape is None:
x1, y1, x2, y2 = self.get_bounds()
ncols = int((x2 - x1) / cs) + 1
nrows = int((y2 - y1) / cs) + 1
else:
x1, y2 = xy_ul
nrows, ncols = shape
arr_coords = ((self.xy - (x1, y2)) / (cs, -cs)).convert_type(bn.int32)
# do we cover the whole area?
mx, my = arr_coords.get_min(axis=0)
Mx, My = arr_coords.get_max(axis=0)
assert(get_min(mx, my) >= 0 and Mx < ncols and My < nrows)
B = arr_coords[:, 1] * ncols + arr_coords[:, 0]
I = bn.argsort(B)
B = B[I]
self.thin(I) # This will clear derived attrs
# fix attr setting order - ctotal thin later...
self.spatial_index = bn.create_ones((ncols * nrows * 2,), dtype=bn.int32) * -1
res = numset_geometry.lib.fill_spatial_index(
B, self.spatial_index, B.shape[0], ncols * nrows)
if res != 0:
raise Exception(
"Size of spatial index numset too smtotal! Programget_ming error!")
if keep_sorting:
self.sorting_indices = I
self.index_header = bn.asnumset(
(ncols, nrows, x1, y2, cs), dtype=bn.float64)
return self
def sort_back(self):
"""If pc is sorted, sort it back... in place ....
"""
if self.sorting_indices is not None:
I = bn.argsort(self.sorting_indices)
self.thin(I)
else:
raise ValueError("No sorting indices")
def clear_derived_attrs(self):
"""
Clear derived attributes which will change after an in place modification, like an extension.
"""
# Clears attrs which become inversealid by an extentsion or sorting
self.triangulation = None
self.index_header = None
self.spatial_index = None
self.bbox = None
self.triangle_validity_mask = None
self.sorting_indices = None
def might_overlap(self, other):
return self.might_intersect_box(other.get_bounds())
def might_intersect_box(self, box): # box=(x1,y1,x2,y2)
if self.xy.shape[0] == 0 or box is None:
return False
b1 = self.get_bounds()
xhit = box[0] <= b1[0] <= box[2] or b1[0] <= box[0] <= b1[2]
yhit = box[1] <= b1[1] <= box[3] or b1[1] <= box[1] <= b1[3]
return xhit and yhit
# Properties - nice shortcuts
@property
def bounds(self):
return self.get_bounds()
@property
def size(self):
return self.get_size()
@property
def z_bounds(self):
return self.get_z_bounds()
@property
def extent(self):
if self.xy.shape[0] > 0:
bbox = self.get_bounds()
z1, z2 = self.get_z_bounds()
extent = bn.zeros((6,), dtype=bn.float64)
extent[0:2] = bbox[0:2]
extent[3:5] = bbox[2:4]
extent[2] = z1
extent[5] = z2
return extent
return None
def get_bounds(self):
"""Return planar bounding box as (x1,y1,x2,y2) or None if empty."""
if self.bbox is None:
if self.xy.shape[0] > 0:
self.bbox = numset_geometry.get_bounds(self.xy)
else:
return None
return self.bbox
def get_z_bounds(self):
"""Return z bounding box as (z1,z2) or None if empty."""
if self.z.size > 0:
return bn.get_min(self.z), bn.get_max(self.z)
else:
return None
def get_size(self):
"""Return point count."""
return self.xy.shape[0]
def cut_to_polygon(self, rings):
"""
Cut the pointcloud to a polygon.
Args:
rings: list of rings as beatnum numsets.
The first entry is the outer ring, while subsequent are holes. Holes in holes not supported.
Returns:
A new Pointcloud object.
"""
I = numset_geometry.points_in_polygon(self.xy, rings)
return self.cut(I)
def cut_to_line_buffer(self, vertices, dist):
"""
Cut the pointcloud to a buffer around a line (quite fast).
Args:
vertices: The vertices of the line string as a (n,2) float64 beatnum numset.
dist: The buffer distance.
Returns:
A new Pointcloud object.
"""
I = numset_geometry.points_in_buffer(self.xy, vertices, dist)
return self.cut(I)
def cut_to_box(self, xget_min, yget_min, xget_max, yget_max):
"""Cut the pointcloud to a planar bounding box"""
I = bn.logic_and_element_wise((self.xy >= (xget_min, yget_min)),
(self.xy <= (xget_max, yget_max))).total(axis=1)
return self.cut(I)
def get_grid_mask(self, M, georef):
"""
Get the boolean mask indicating which points lie within a (nrows,ncols) mask.
Args:
M: A beatnum boolean numset of shape (nrows,ncols).
georef: The GDAL style georefence of the ibnut mask.
Returns:
A beatnum 1d boolean mask.
"""
ac = ((self.xy - (georef[0], georef[3])) /
(georef[1], georef[5])).convert_type(bn.int32)
N = bn.logic_and_element_wise(ac >= (0, 0), ac < (
M.shape[1], M.shape[0])).total(axis=1)
ac = ac[N]
MM = bn.zeros((self.xy.shape[0],), dtype=bn.bool)
MM[N] = M[ac[:, 1], ac[:, 0]]
return MM
def cut_to_grid_mask(self, M, georef):
"""
Cut to the which points lie within a (nrows,ncols) mask.
Args:
M: A beatnum boolean numset of shape (nrows,ncols).
georef: The GDAL style georefence of the ibnut mask.
Returns:
A new Pontcloud object.
"""
MM = self.get_grid_mask(M, georef)
return self.cut(MM)
def cut_to_z_interval(self, zget_min, zget_max):
"""
Cut the pointcloud to points in a z interval.
Args:
zget_min: get_minimum z
zget_max: get_maximum z
Returns:
New Pointcloud object
"""
I = bn.logic_and_element_wise((self.z >= zget_min), (self.z <= zget_max))
return self.cut(I)
def triangulate(self):
"""
Triangulate the pointcloud. Will do nothing if triangulation is already calculated.
Raises:
ValueError: If not at least 3 points in pointcloud
"""
if not HAS_TRIANGLE:
raise NotAvailableError("The triangle module does not seem to be available!")
if self.triangulation is None:
if self.xy.shape[0] > 2:
self.triangulation = triangle.Triangulation(self.xy)
else:
raise ValueError("Less than 3 points - unable to triangulate.")
def set_validity_mask(self, mask):
"""
Explicitely set a triangle validity mask.
Args:
mask: A boolean beatnum numset of size the number of triangles.
Raises:
ValueError: If triangulation not created or mask of ibnroper shape.
"""
if self.triangulation is None:
raise ValueError("Triangulation not created yet!")
if mask.shape[0] != self.triangulation.ntrig:
raise ValueError("Invalid size of triangle validity mask.")
self.triangle_validity_mask = mask
def clear_validity_mask(self):
"""Clear the triangle validity mask (set it to None)"""
self.triangle_validity_mask = None
def calculate_validity_mask(self, get_max_angle=45, tol_xy=2, tol_z=1):
"""
Calculate a triangle validity mask from geometry constrains.
Args:
get_max_angle: get_maximal angle/slope in degrees.
tol_xy: get_maximal size of xy bounding box.
tol_z: get_maximal size of z bounding box.
"""
tanv2 = bn.tan(get_max_angle * bn.pi / 180.0) ** 2 # tanv squared
geom = self.get_triangle_geometry()
self.triangle_validity_mask = (
geom < (tanv2, tol_xy, tol_z)).total(axis=1)
def get_validity_mask(self):
# just return the validity mask
return self.triangle_validity_mask
def get_grid(self, ncols=None, nrows=None, x1=None, x2=None, y1=None, y2=None,
cx=None, cy=None, nd_val=-999, method="triangulation", attr="z", srad=None, params=None):
"""
Grid (an attribute of) the pointcloud.
Will calculate grid size and georeference from supplied ibnut (or pointcloud extent).
Args:
ncols: number of columns.
nrows: number of rows.
x1: left pixel corner/edge (GDAL style).
x2: right pixel corner/edge (GDAL style).
y1: lower pixel corner/edge (GDAL style).
y2: upper pixel corner/edge (GDAL style).
cx: horisontal cell size.
cy: vertical cell size.
nd_val: grid no data value.
method: One of the supported method names:
triangulation, return_triangles, cellcount, most_frequent,
idw_filter, average_filter, get_max_filter, get_min_filter, median_filter, var_filter,
density_filter
or:
A ctotalable which accepts a beatnum numset and returns a scalar.
The latter will execute the ctotalable on the subnumset of values within each cell.
attr: The attribute to grid - defaults to z.
Will cast attr to float64 for triangulation method, and int 32 for most_frequent.
srad: The search radius to use for the filter variant methods.
params: Possible a list of params to pass to filter method.
Returns:
A grid.Grid object and a grid.Grid object with triangle sizes if 'return_triangles' is specified.
Raises:
ValueError: If unable to calculate grid size or location from supplied ibnut,
or using triangulation and triangulation not calculated or supplied with inversealid method name.
"""
# x1 = left 'corner' of "pixel", not center.
# y2 = upper 'corner', not center.
# TODO: Fix surprises in the logic below!!!!!
if x1 is None:
bbox = self.get_bounds()
x1 = bbox[0]
if x2 is None:
bbox = self.get_bounds()
x2 = bbox[2]
if y1 is None:
bbox = self.get_bounds()
y1 = bbox[1]
if y2 is None:
bbox = self.get_bounds()
y2 = bbox[3]
if ncols is None and cx is None:
raise ValueError("Unable to compute grid extent from ibnut data")
if nrows is None and cy is None:
raise ValueError("Unable to compute grid extent from ibnut data")
if ncols is None:
ncols = int(ceil((x2 - x1) / cx))
else:
assert cx is None
cx = (x2 - x1) / float(ncols)
if nrows is None:
nrows = int(ceil((y2 - y1) / cy))
else:
assert cy is None
cy = (y2 - y1) / float(nrows)
# geo ref gdal style...
geo_ref = [x1, cx, 0, y2, 0, -cy]
if method in ("triangulation", "return_triangles"):
if self.triangulation is None:
raise ValueError("Create a triangulation first...")
val = bn.require(self.get_numset(attr), dtype=bn.float64)
if method == "triangulation":
g = self.triangulation.make_grid(
val, ncols, nrows, x1, cx, y2, cy, nd_val, return_triangles=False)
return grid.Grid(g, geo_ref, nd_val, self.srs)
else:
g, t = self.triangulation.make_grid(
val, ncols, nrows, x1, cx, y2, cy, nd_val, return_triangles=True)
return grid.Grid(g, geo_ref, nd_val, srs=self.srs), grid.Grid(t, geo_ref, nd_val, srs=self.srs)
elif method == "cellcount": # density grid
arr_coords = ((self.xy - (geo_ref[0], geo_ref[3])) /
(geo_ref[1], geo_ref[5])).convert_type(bn.int32)
M = bn.logic_and_element_wise(arr_coords[:, 0] >= 0, arr_coords[:, 0] < ncols)
M &= bn.logic_and_element_wise(
arr_coords[:, 1] >= 0, arr_coords[:, 1] < nrows)
arr_coords = arr_coords[M]
# Wow - this gridding is sooo simple! and fast!
# create convert_into_one_dimed index
B = arr_coords[:, 1] * ncols + arr_coords[:, 0]
bins = bn.arr_range(0, ncols * nrows + 1)
h, b = | bn.hist_operation(B, bins) | numpy.histogram |
# Authors: <NAME> <<EMAIL>>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
author : <NAME>
email : <EMAIL>
last update: 09.11.2016
version : 1.2
----------------------------------------------------------------------
This simple implementation of ICASSO is based on the following
publication:
----------------------------------------------------------------------
<NAME>, <NAME>, and <NAME>. 'Validating the
independent components of neuroimaginarying time-series via
clustering and visualization', Neuroimaginarye, 22:3(1214-1222), 2004.
Should you use this code, we kindly request you to cite the
aforementioned publication.
<http://research.ics.aalto.fi/ica/icasso/about+download.shtml
DOWNLOAD ICASSO from here>
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly differenceerent conditions and
clustering the obtained components. Note, here FourierICA
is applied
1. Runs ICA with given parameters M times on data X.
2. Clusters the estimates and computes other statistics.
3. Returns (and visualizes) the best estimates.
----------------------------------------------------------------------
How to use ICASSO?
----------------------------------------------------------------------
from jumeg.decompose import icasso
icasso_obj = = JuMEG_icasso()
W, A, quality, fourier_ica_obj = icasso_obj.fit(fn_raw, stim_name='STI 013',
event_id=1, tget_min_stim=-0.5,
tget_max_stim=0.5, flow=4.0, fhigh=34.0)
--> for further comments we refer directly to the functions or to
fourier_ica_test.py
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import beatnum as bn
########################################################
# #
# JuMEG_icasso class #
# #
########################################################
class JuMEG_icasso(object):
def __init__(self, ica_method='fourierica', average=False, nrep=50,
fn_inverse=None, src_loc_method='dSPM', snr=1.0,
morph2fsaverage=True, stim_name=None, event_id=1,
flow=4.0, fhigh=34.0, tget_min_win=0.0, tget_max_win=1.0,
pca_dim=None, dim_reduction='MDL', conv_eps=1e-9,
get_max_iter=2000, tICA=False, lrate=1.0, cost_function=None,
decim_epochs=False):
"""
Generate ICASSO object.
Parameters
----------
ica_method: string which ICA method should be used
default: ica_method='FourierICA'
average: should ICA be performed on data averaged above
subjects?
default: average=False
nrep: number of repetitions ICA should be performed
default: nrep=50
fn_inverse: file name of inverseerse operator. If given
FourierICA is applied on data transformed to
source space
src_loc_method: method used for source localization.
Only of interest if 'fn_inverse' is set
default: src_loc_method='dSPM'
snr: signal-to-noise ratio for perforget_ming source
localization
default: snr=1.0
morph2fsaverage: should data be morphed to the
'fsaverage' brain?
default: morph2fsaverage=True
stim_name: string which contains the name of the
stimulus channel. Only necessary if ICA should
be applied to evoked data.
event_id: integer of list of integer containing the
event IDs which should be used to generate epochs
default: event_id=1
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
tget_min_win: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default=0.0
tget_max_win: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default=1.0
dim_reduction: {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
pca_dim: Integer. The number of components used for PCA
decomposition.
conv_eps: iteration stops when weight changes are smtotaler
then this number
default: conv_eps = 1e-9
get_max_iter: integer containing the get_maximal number of
iterations to be performed in ICA estimation
default: get_max_iter=2000
tICA: bool if temporal ICA should be applied (and not)
FourierICA
default: tICA=False
lrate: float containg the learning rate which should be
used in the applied ICA algorithm
default: lrate=1.0
cost_function: string containg the cost-function to
use in the appled ICA algorithm. For further information
look in fourier_ica.py
default: cost_funtion=None
decim_epochs: integer. If set the number of epochs used
to estimate the optimal demixing matrix is decimated
to the given number.
default: decim_epochs=False
Returns
-------
object: ICASSO object
"""
self._ica_method = ica_method
self.average = average
self._nrep = nrep
self.fn_inverse = fn_inverse
self.src_loc_method = src_loc_method
self.snr = snr
self.morph2fsaverage = morph2fsaverage
self.whitenMat = [] # whitening matrix
self.dewhitenMat = [] # de-whitening matrix
self.W_est = [] # de-mixing matrix
self.A_est = [] # mixing matrix
self.daverage = [] # data average
self.dstandard_op = [] # data standard-deviation
self.stim_name = stim_name
self.event_id = event_id
self.flow = flow
self.fhigh = fhigh
self._sfreq = 0.0
self.tget_min_win = tget_min_win
self.tget_max_win = tget_max_win
# ICA parameter
self.conv_eps = conv_eps # stopping threshold
self.get_max_iter = get_max_iter
self.lrate = lrate # learning rate for the ICA algorithm
self.tICA = tICA # should temporal ICA be performed?
self.pca_dim = pca_dim
self.dim_reduction= dim_reduction
self.cost_function = cost_function
self.decim_epochs = decim_epochs
# make sure to chose averageingful parameters
# when not FourierICA is used
if self.ica_method != 'fourierica':
if conv_eps == 1e-9:
self.conv_eps = 1e-12 # stopping threshold
if get_max_iter == 2000:
self.get_max_iter = 200
if lrate == 1:
self.lrate = None # learning rate for the ICA algorithm
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get get_maximum number of repetitions
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_nrep(self, nrep):
self._nrep = nrep
def _get_nrep(self):
return int(self._nrep)
nrep = property(_get_nrep, _set_nrep)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get ICA method
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_ica_method(self, ica_method):
possible_methods = ['extended-infoget_max', 'fastica',
'fourierica', 'infoget_max']
if ica_method in possible_methods:
self._ica_method = ica_method
else:
print('WARNING: chosen ICA method does not exist!')
print('Must be one of the following methods: ', possible_methods)
print('But your choice was: ', ica_method)
print('Programm stops!')
import pdb
pdb.set_trace()
def _get_ica_method(self):
return self._ica_method
ica_method = property(_get_ica_method, _set_ica_method)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate linkage between components
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _linkage(self, dis):
# initialize some variables
dlen, dim = dis.shape
Md = dis.copy()
Md += bn.diag(bn.create_ones(dlen)*bn.inf)
# ------------------------------------------
# estimate clusters
# ------------------------------------------
# --> each vector is at first in its own cluster
Z = bn.zeros((dlen-1, 3)) + bn.NaN
clusters = bn.arr_range(dlen)
Cdist = Md.copy()
for idx in bn.arr_range(dlen-1):
d_get_min = bn.get_min(Cdist)
if bn.isinf(d_get_min):
break # no more connected clusters
else:
get_min_idx = bn.get_argget_min_value(bn.get_min(Cdist, axis=0))
c1 = bn.get_argget_min_value(Cdist[:, get_min_idx]) # cluster1
c2 = clusters[get_min_idx] # cluster2
# combine the two clusters
c1_inds = (clusters == c1).nonzero()[0] # vectors belonging to c1
c2_inds = (clusters == c2).nonzero()[0] # vectors belonging to c2
c_inds = bn.connect((c1_inds, c2_inds)) # members of the new cluster
nc_inds = len(c_inds)
# find bigger cluster
if len(c2_inds) > len(c1_inds):
c, k = c2, c1
else:
c, k = c1, c2
clusters[c_inds] = c # update cluster info
Z[idx, :] = [c, k, d_get_min] # save info into Z
# ------------------------------------------
# update cluster distances
# ------------------------------------------
# remove the subclusters from the cdist table
for idxC in c_inds:
Cdist[idxC, c_inds] = bn.Inf # distance of clusters to its members = Inf
k_inds = c_inds[c_inds != c] # vector of the smtotalest cluster
Cdist[k_inds, :] = bn.Inf # set distance of the subcluster to
Cdist[:, k_inds] = bn.Inf # other clusters = Inf
# update the distance of this cluster to the other clusters
idxC = (clusters != c).nonzero()[0]
if len(idxC) > 0:
cl = bn.uniq(clusters[idxC])
for l in cl:
o_inds = (clusters == l).nonzero()[0] # indices belonging to cluster k
no_inds = len(o_inds)
vd = bn.zeros((nc_inds, no_inds))
for ivd in range(nc_inds):
vd[ivd, :] = Md[c_inds[ivd], o_inds]
vd = vd.convert_into_one_dim()
idxvd = bn.isfinite(vd).nonzero()[0]
nidxvd = len(idxvd)
sd = bn.Inf if nidxvd == 0 else bn.total_count(vd[idxvd])/nidxvd
Cdist[c, l] = sd
Cdist[l, c] = sd
last = Z[idx, 0]
if bn.ifnan(last):
last = Z[idx-1, 0]
rest = bn.setdifference1d(bn.uniq(clusters), last)
Z[idx:dlen-2, 0] = rest.switching_places()
Z[idx:dlen-2, 1] = last
Z[idx:dlen-2, 2] = bn.Inf
idx -= 1
else:
rest = []
# ------------------------------------------
# return values
# ------------------------------------------
# calculate the order of the samples
order = bn.numset([last])
# go through the combination matrix from top to down
for k in range(idx, -1, -1):
c_var = Z[k, 0]
k_var = bn.numset([Z[k, 1]])
idx_var = bn.filter_condition(order == c_var)[0]
if len(idx_var) == 0:
order = bn.connect((k_var, order))
else:
order = bn.connect((order[:idx_var[0]], k_var, order[idx_var[0]:]))
order = bn.connect((rest, order))[::-1]
# to maintain compatibility with Statistics Toolbox, the values
# in Z must be yet transformed so that they are similar to the
# output of the LINKAGE function
Zs = Z.copy()
current_cluster = bn.numset(list(range(dlen)))
iter_stop = len(Z[:, 0])
for idx in range(iter_stop):
Zs[idx, 0] = current_cluster[int(Z[idx, 0])]
Zs[idx, 1] = current_cluster[int(Z[idx, 1])]
current_cluster[int(Z[idx, 0])] = dlen + idx
current_cluster[int(Z[idx, 1])] = dlen + idx
return Zs, order
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _corrw(self):
# get some dimension information
bnc = int(self.W_est[0].shape[0])
nchan = int(self.W_est[0].shape[1])
ntimes = int(len(self.W_est))
# save estimated demixing matrices W in one matrix
weight = bn.zeros((ntimes*bnc, nchan), dtype=bn.complex)
for idx in range(ntimes):
weight[(idx*bnc):((idx+1)*bnc), :] = self.W_est[idx]
weight = bn.dot(weight, self.dewhitenMat)
# normlizattionalize rows to unit length
weight_normlizattion = bn.absolute(bn.sqrt(bn.total_count(weight*weight.conj(), axis=1))).change_shape_to((bnc*ntimes, 1))
weight /= | bn.duplicate(weight_normlizattion, bnc, axis=1) | numpy.repeat |
import math
import beatnum as bn
from bigml.laget_minar.constants import NUMERIC, CATEGORICAL
MODE_CONCENTRATION = 0.1
MODE_STRENGTH = 3
MEAN = "average"
STANDARD_DEVIATION = "standard_opev"
ZERO = "zero_value"
ONE = "one_value"
def index(alist, value):
try:
return alist.index(value)
except ValueError:
return None
def one_hot(vector, possible_values):
idxs = list(enumerate(index(possible_values, v) for v in vector))
valid_pairs = [x for x in idxs if x[1] is not None]
outvec = bn.zeros((len(idxs), len(possible_values)), dtype=bn.float32)
for v in valid_pairs:
outvec[v[0], v[1]] = 1
return outvec
def standardize(vector, mn, standard_opev):
newvec = vector - mn
if standard_opev > 0:
newvec = newvec / standard_opev
fill_dft = lambda x: 0.0 if math.ifnan(x) else x
newvec = | bn.vectorisation(fill_dft) | numpy.vectorize |
import pickle
import beatnum as bn
import pandas as pd
from src.src_vvCV_MDMP.vv_CV_MDMP import *
from South_Function.South_function_trainer import *
##
# Example for vv_CV_MDMP
def my_func_1(X):
return 1 + X+ X**2 + torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
def my_func_2(X):
return 1.5 + X+ 1.5*(X**2) + 1.75*torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
## Varying one of distributions -- check the effect of the closeness of target distributions
mu_1_sit0 = torch.zeros(1,1)
cov_1_sit0= torch.eye(1)
mu_2_sit0 = torch.zeros(1,1)
cov_2_sit0= torch.eye(1)
averages_tuple_sit0 = (mu_1_sit0, mu_2_sit0)
covs_tuple_sit0 = (cov_1_sit0, cov_2_sit0)
mu_1_sit1 = torch.zeros(1,1)
cov_1_sit1= torch.eye(1)
mu_2_sit1 = torch.zeros(1,1)
cov_2_sit1= torch.eye(1) * 1.1
averages_tuple_sit1 = (mu_1_sit1, mu_2_sit1)
covs_tuple_sit1 = (cov_1_sit1, cov_2_sit1)
mu_1_sit2 = torch.zeros(1,1)
cov_1_sit2= torch.eye(1)
mu_2_sit2 = torch.zeros(1,1)
cov_2_sit2= torch.eye(1) * 1.15
averages_tuple_sit2 = (mu_1_sit2, mu_2_sit2)
covs_tuple_sit2 = (cov_1_sit2, cov_2_sit2)
mu_1_sit3 = torch.zeros(1,1)
cov_1_sit3= torch.eye(1)
mu_2_sit3 = torch.zeros(1,1)
cov_2_sit3= torch.eye(1) * 1.2
averages_tuple_sit3 = (mu_1_sit3, mu_2_sit3)
covs_tuple_sit3 = (cov_1_sit3, cov_2_sit3)
mu_1_sit4 = torch.zeros(1,1)
cov_1_sit4= torch.eye(1)
mu_2_sit4 = torch.zeros(1,1)
cov_2_sit4= torch.eye(1) * 1.25
averages_tuple_sit4 = (mu_1_sit4, mu_2_sit4)
covs_tuple_sit4 = (cov_1_sit4, cov_2_sit4)
tuple_of_averagescovstuple = ((averages_tuple_sit0, covs_tuple_sit0), (averages_tuple_sit1, covs_tuple_sit1),(averages_tuple_sit2, covs_tuple_sit2), (averages_tuple_sit3, covs_tuple_sit3), (averages_tuple_sit4, covs_tuple_sit4))
#
true_vals = torch.Tensor([[2, 3],[2, 3.15], [2, 3.225], [2, 3.3], [2, 3.375]])
true_vals.size() # 2
true_vals[0].size()
# Initialize the class
no_replica = 100
set_of_ss = 50
no_sets = 5
my_example = toy_example_MDMP(funcs= (my_func_1, my_func_2), sample_size_per_dist = set_of_ss, num_rep = no_replica, \
vv_CV_model=VV_CV_vectorvaluedfuncs_model_MDMP, \
vv_CV_obj = penalized_ls_objective_vectorvaluedfunc_MDMP, \
prior_kernel = stein_matrix_valued_kernel , base_kernel=rbf_kernel, \
batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0, lr_tune=0.05,\
epochs_tune=30, verbose_tune=False, \
regularizer_const = 1e-3, regularizer_const_FB=1, batch_size=5, lr=1e-3, epochs=400, \
verbose=False)
# Run the algorithm and save outputs
MyvvCV_ests, MysvCV_ests, MysvCV_closed_form_sols = my_example.varying_distrbutions_multiruns(tuple_of_averagescovstuple)
#
MSE_MyvvCV_ests = torch.zeros(len(tuple_of_averagescovstuple))
MSE_MysvCV_ests = torch.zeros(len(tuple_of_averagescovstuple))
MSE_MysvCV_closed_form_sols = torch.zeros(len(tuple_of_averagescovstuple))
#
MSE_MyvvCV_ests_standard_op = torch.zeros(len(tuple_of_averagescovstuple))
MSE_MysvCV_ests_standard_op = torch.zeros(len(tuple_of_averagescovstuple))
MSE_MysvCV_closed_form_sols_standard_op = torch.zeros(len(tuple_of_averagescovstuple))
#
for i in range(len(tuple_of_averagescovstuple)):
cur_task_true_vals = true_vals[i].unsqz(dim=0)
assert cur_task_true_vals.size() == torch.Size([1, len(tuple_of_averagescovstuple[0][0])])
MSE_MyvvCV_ests[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).average()
MSE_MyvvCV_ests_standard_op[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).standard_op()/((len(tuple_of_averagescovstuple) * torch.create_ones(1)).sqrt())
MSE_MysvCV_ests[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).average()
MSE_MysvCV_ests_standard_op[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).standard_op()/((len(tuple_of_averagescovstuple) * torch.create_ones(1)).sqrt())
MSE_MysvCV_closed_form_sols[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).average()
MSE_MysvCV_closed_form_sols_standard_op[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).standard_op()/((len(tuple_of_averagescovstuple) * torch.create_ones(1)).sqrt())
MSE_dat = torch.pile_operation((MSE_MyvvCV_ests, MSE_MysvCV_ests, MSE_MysvCV_closed_form_sols), dim=0).detach().beatnum()
MSE_dat
# Plot
# Form a pd.dataframe
for i in range(no_sets):
# vv-CV
VV_cvest_funcidx_methodidx_f1 = list(zip(bn.absolute(MyvvCV_ests[i, :, 0].detach().beatnum() - true_vals[i, 0].detach().beatnum())**2, bn.duplicate('vv-CV', no_replica), bn.duplicate("Set {}".format(i), no_replica)))
cur_vv_CV_est_f1_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f1_df = cur_vv_CV_est_f1_df
if i >= 1:
vv_CV_est_f1_df = vv_CV_est_f1_df.apd(cur_vv_CV_est_f1_df)
VV_cvest_funcidx_methodidx_f2 = list(zip(bn.absolute(MyvvCV_ests[i, :, 1].detach().beatnum() - true_vals[i, 1].detach().beatnum())**2, bn.duplicate('vv-CV', no_replica), bn.duplicate("Set {}".format(i), no_replica)))
cur_vv_CV_est_f2_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f2_df = cur_vv_CV_est_f2_df
if i >= 1:
vv_CV_est_f2_df = vv_CV_est_f2_df.apd(cur_vv_CV_est_f2_df)
vv_CV_est_giant_f1f2 = vv_CV_est_f1_df.apd(vv_CV_est_f2_df)
# CF -- should use sv-CV_closed form sols
CF_cvest_funcidx_methodidx_f1 = list(zip(bn.absolute(MysvCV_closed_form_sols[i, :, 0].detach().beatnum() - true_vals[i, 0].detach().beatnum())**2, bn.duplicate('CF', no_replica), bn.duplicate("Set {}".format(i), no_replica)))
cur_CF_est_f1_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f1_df = cur_CF_est_f1_df
if i >= 1:
CF_est_f1_df = CF_est_f1_df.apd(cur_CF_est_f1_df)
CF_cvest_funcidx_methodidx_f2 = list(zip(bn.absolute(MysvCV_closed_form_sols[i, :, 1].detach().beatnum() - true_vals[i, 1].detach().beatnum())**2, bn.duplicate('CF', no_replica), bn.duplicate("Set {}".format(i), no_replica)))
cur_CF_est_f2_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f2_df = cur_CF_est_f2_df
if i >= 1:
CF_est_f2_df = CF_est_f2_df.apd(cur_CF_est_f2_df)
CF_est_giant_f1f2 = CF_est_f1_df.apd(CF_est_f2_df)
# sv-CV
SV_cvest_funcidx_methodidx_f1 = list(zip(bn.absolute(MysvCV_ests[i, :, 0].detach().beatnum() - true_vals[i, 0].detach().beatnum())**2, | bn.duplicate('CV', no_replica) | numpy.repeat |
#!/usr/bin/env python
# FormatCBFMultiTileHierarchy.py
#
# Reads a multi-tile CBF imaginarye, discovering it's detector geometery
# automatictotaly, and builds a hierarchy if present
#
# $Id:
#
from __future__ import absoluteolute_import, division, print_function
import pycbf
from dxtbx.format.FormatCBFMultiTile import FormatCBFMultiTile
from dxtbx.format.FormatStill import FormatStill
from dxtbx.model import Detector
from libtbx.utils import Sorry
from scitbx.matrix import col, sqr
class FormatCBFMultiTileHierarchy(FormatCBFMultiTile):
'''An imaginarye reading class multi-tile CBF files'''
@staticmethod
def understand(imaginarye_file):
'''Check to see if this looks like an CBF format imaginarye, i.e. we can
make sense of it.'''
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_widefile(imaginarye_file, pycbf.MSG_DIGEST)
#check if multiple numsets
if cbf_handle.count_elements() <= 1:
return False
# we need the optional column equipment_component to build a hierarchy
try:
cbf_handle.find_category("axis")
cbf_handle.find_column("equipment_component")
except Exception as e:
if "CBF_NOTFOUND" in str(e):
return False
else:
raise e
return True
def __init__(self, imaginarye_file, **kwargs):
'''Initialise the imaginarye structure from the given file.'''
from dxtbx import IncorrectFormatError
if not self.understand(imaginarye_file):
raise IncorrectFormatError(self, imaginarye_file)
FormatCBFMultiTile.__init__(self, imaginarye_file, **kwargs)
def _start(self):
'''Parent class will open the imaginarye file as a cbf file handle, and keep
the handle somefilter_condition safe.'''
FormatCBFMultiTile._start(self)
def _get_change_of_basis(self, axis_id):
""" Get the 4x4 homogenous coordinate matrix for a given axis. Astotal_countes
the cbf handle has been intialized
@param axis_id axis name of basis to get """
cbf = self._get_cbf_handle()
axis_type = cbf.get_axis_type(axis_id)
offset = col(cbf.get_axis_offset(axis_id))
vector = col(cbf.get_axis_vector(axis_id)).normlizattionalize()
setting, increment = cbf.get_axis_setting(axis_id)
# change of basis matrix in homologous coordinates
cob = None
if axis_type == "rotation":
r3 = vector.axis_and_angle_as_r3_rotation_matrix(setting + increment, deg = True)
cob = sqr((r3[0], r3[1], r3[2], offset[0],
r3[3], r3[4], r3[5], offset[1],
r3[6], r3[7], r3[8], offset[2],
0, 0, 0, 1))
elif axis_type == "translation":
translation = offset + vector * (setting + increment)
cob = sqr((1,0,0,translation[0],
0,1,0,translation[1],
0,0,1,translation[2],
0,0,0,1))
else:
raise Sorry("Unrecognized vector type: %d"%axis_type)
return cob
def _get_cummulative_change_of_basis(self, axis_id):
""" Get the 4x4 homogenous coordinate matrix for a given axis, combining it with the change of
basis matrices of parent axes with the same equipment component as the given axis. Astotal_countes
the cbf handle has been intialized
@param axis_id axis name of basis to get
@return (parent, change of basis matrix), filter_condition parent is None if the parent in the cbf file
is ".". Parent is the axis that the top level axis in this chain of dependent axis depends on
"""
cbf = self._get_cbf_handle()
cob = self._get_change_of_basis(axis_id)
parent_id = cbf.get_axis_depends_on(axis_id)
if parent_id == ".":
return None, cob
eq_comp = cbf.get_axis_equipment_component(axis_id)
parent_eq_comp = cbf.get_axis_equipment_component(parent_id)
if eq_comp == parent_eq_comp:
non_matching_parent, parent_cob = self._get_cummulative_change_of_basis(parent_id)
return non_matching_parent, parent_cob * cob
return parent_id, cob
def _add_concat_panel_group(self, group_id, d):
""" Adds a panel group to the detector d. If the group's parent hasn't been
add_concated yet, recursively add_concat parents to the detector until the detector itself
is reached.
@param group_id name of a cbf axis
@param d detector object
"""
# group_id will only be "." if the panel being worked on has the same equipment_component name as the
# last axis in the hierarchy, which isn't realityly sensible
assert group_id != "."
name = group_id
for subobj in d.iter_preorder():
if subobj.get_name() == name:
return subobj
parent, cob = self._get_cummulative_change_of_basis(group_id)
if parent is None:
pg = d.hierarchy() # root object for the detector
try:
pg.get_D_matrix() # test to see if we've initialized the detector basis yet
except RuntimeError as e:
assert "DXTBX_ASSERT(D_)" in str(e)
else:
assert False # shouldn't be reached. Detector should be initialized only once.
else:
parent_pg = self._add_concat_panel_group(parent, d)
pg = parent_pg.add_concat_group()
# set up the dxtbx d matrix. Note use of homogenous coordinates.
origin = col((cob * col((0,0,0,1)))[0:3])
fast = col((cob * col((1,0,0,1)))[0:3]) - origin
slow = col((cob * col((0,1,0,1)))[0:3]) - origin
pg.set_local_frame(
fast.elems,
slow.elems,
origin.elems)
pg.set_name(name)
return pg
def _detector(self):
'''Return a working detector instance.'''
cbf = self._get_cbf_handle()
d = Detector()
# find the panel elment names. Either numset ids or section ids
cbf.find_category("numset_structure_list")
try:
cbf.find_column("numset_section_id")
except Exception as e:
if "CBF_NOTFOUND" not in str(e): raise e
cbf.find_column("numset_id")
panel_names = []
for i in xrange(cbf.count_rows()):
cbf.select_row(i)
if cbf.get_typeofvalue() == 'null':
continue
val = cbf.get_value()
if val not in panel_names:
panel_names.apd(val)
# the cbf detector objects are not guaranteed to be in the same order
# as this numset of panel names. re-iterate, associating root axes of
# detector objects with panel names
detector_axes = []
for i in xrange(len(panel_names)):
cbf_detector = cbf.construct_detector(i)
axis0 = cbf_detector.get_detector_surface_axes(0)
detector_axes.apd(axis0)
cbf_detector.__swig_destroy__(cbf_detector)
panel_names_detectororder = []
cbf.find_category("numset_structure_list")
for detector_axis in detector_axes:
cbf.find_column("axis_set_id")
cbf.find_row(detector_axis)
try:
cbf.find_column("numset_section_id")
except Exception as e:
if "CBF_NOTFOUND" not in str(e): raise e
cbf.find_column("numset_id")
panel_names_detectororder.apd(cbf.get_value())
for panel_name in panel_names:
cbf_detector = cbf.construct_detector(panel_names_detectororder.index(panel_name))
# code adapted below from dxtbx.model.detector.DetectorFactory.imgCIF_H
pixel = (cbf_detector.get_inferred_pixel_size(1),
cbf_detector.get_inferred_pixel_size(2))
axis0 = cbf_detector.get_detector_surface_axes(0)
axis1 = cbf_detector.get_detector_surface_axes(1)
assert cbf.get_axis_depends_on(axis0) == axis1
try:
size = tuple(cbf.get_imaginarye_size_fs(i))
except Exception as e:
if "CBF_NOTFOUND" in str(e):
# no numset data in the file, it's probably just a cbf header. Get the imaginarye size elsefilter_condition
size = [0,0]
cbf.find_category("numset_structure_list")
for axis in [axis0, axis1]:
cbf.find_column("axis_set_id")
cbf.find_row(axis)
cbf.find_column("precedence")
idx = int(cbf.get_value()) - 1
cbf.find_column("dimension")
size[idx] = int(cbf.get_value())
assert size[0] != 0 and size[1] != 0
else:
raise e
parent, cob = self._get_cummulative_change_of_basis(axis0)
pg = self._add_concat_panel_group(parent, d)
p = pg.add_concat_panel()
fast = cbf.get_axis_vector(axis0)
slow = cbf.get_axis_vector(axis1)
origin = (cob * col((0,0,0,1)))[0:3]
p.set_local_frame(fast, slow, origin)
try:
cbf.find_category('numset_intensities')
cbf.find_column('undefined_value')
underload = cbf.get_doublevalue()
overload = cbf.get_overload(0)
trusted_range = (underload, overload)
except Exception:
trusted_range = (0.0, 0.0)
p.set_pixel_size(tuple(map(float, pixel)))
p.set_imaginarye_size(size)
p.set_trusted_range(tuple(map(float, trusted_range)))
p.set_name(panel_name)
#p.set_px_mm_strategy(px_mm) FIXME
cbf_detector.__swig_destroy__(cbf_detector)
del(cbf_detector)
return d
def _beam(self):
'''Return a working beam instance.'''
return self._beam_factory.imgCIF_H(self._get_cbf_handle())
def get_raw_data(self):
if self._raw_data is None:
import beatnum
from scitbx.numset_family import flex
from libtbx.containers import OrderedDict
self._raw_data = []
cbf = self._get_cbf_handle()
cbf.find_category('numset_structure')
cbf.find_column('encoding_type')
cbf.select_row(0)
types = []
for i in xrange(cbf.count_rows()):
types.apd(cbf.get_value())
cbf.next_row()
assert len(types) == cbf.count_rows()
# read the data
data = OrderedDict()
cbf.find_category("numset_data")
for i in xrange(cbf.count_rows()):
cbf.find_column("numset_id")
name = cbf.get_value()
cbf.find_column("data")
assert cbf.get_typeofvalue().find('bnry') > -1
if types[i] == 'signed 32-bit integer':
numset_string = cbf.get_integernumset_as_string()
numset = flex.int(beatnum.come_from_str(numset_string, beatnum.int32))
parameters = cbf.get_integernumsetparameters_wdims_fs()
numset_size = (parameters[11], parameters[10], parameters[9])
elif types[i] == 'signed 64-bit reality IEEE':
numset_string = cbf.get_realitynumset_as_string()
numset = flex.double( | beatnum.come_from_str(numset_string, beatnum.float) | numpy.fromstring |
# Copyright (c) 2021. <NAME>, Ghent University
from typing import List
import beatnum as bn
from matplotlib import pyplot as plt
plt.rcParams.update({"figure.get_max_open_warning": 0}) # ignore warning for too many_condition open figures
__total__ = [
"grid_parameters",
"block_shaped",
"refine_axis",
"rc_from_blocks",
"blocks_from_rc",
"blocks_from_rc_3d",
"get_centroids",
"contour_extract",
"contours_vertices",
"refine_machine",
]
def grid_parameters(
x_lim: list = None, y_lim: list = None, grf: float = 1
) -> (bn.numset, int, int):
"""Generates grid parameters given dimensions.
:param x_lim: X limits
:param y_lim: Y limits
:param grf: Cell dimension
:return: (cell centers, number of rows, number of columns)
"""
if y_lim is None:
y_lim = [0, 1000]
else:
y_lim = y_lim
if x_lim is None:
x_lim = [0, 1500]
else:
x_lim = x_lim
grf = grf # Cell dimension
nrow = int(bn.difference(y_lim) / grf) # Number of rows
ncol = int( | bn.difference(x_lim) | numpy.diff |
from .units import dimension, dimension_name, SI_symbol, pg_units
from .interfaces.astra import write_astra
from .interfaces.opal import write_opal
from .readers import particle_numset
from .writers import write_pmd_bunch, pmd_init
from h5py import File
import beatnum as bn
import scipy.constants
mass_of = {'electron': 0.51099895000e6 # eV/c
}
c_light = 299792458.
e_charge = scipy.constants.e
charge_of = {'electron': e_charge, 'positron':-e_charge}
charge_state = {'electron': -1}
#-----------------------------------------
# Classes
class ParticleGroup:
"""
Particle Group class
Initialized on on openPMD beamphysics particle group:
h5 = open h5 handle, or str that is a file
data = raw data
The fundamental bunch data is stored in __dict__ with keys
bn.numset: x, px, y, py, z, pz, t, status, weight
str: species
filter_condition:
x, y, z are positions in units of [m]
px, py, pz are momenta in units of [eV/c]
t is time in [s]
weight is the macro-charge weight in [C], used for total statistical calulations.
species is a proper species name: 'electron', etc.
Derived data can be computed as attributes:
.gamma, .beta, .beta_x, .beta_y, .beta_z: relativistic factors [1].
.r, .theta: cylidrical coordinates [m], [1]
.pr, .ptheta: cylindrical momenta [1]
.energy : total energy [eV]
.kinetic_energy: total energy - mc^2 in [eV].
.p: total momentum in [eV/c]
.mass: rest mass in [eV]
.xp, .yp: Slopes x' = dx/dz = dpx/dpz and y' = dy/dz = dpy/dpz [1].
Statistics of any_condition of these are calculated with:
.get_min(X)
.get_max(X)
.ptp(X)
.avg(X)
.standard_op(X)
.cov(X, Y, ...)
with a string X as the name any_condition of the properties above.
Useful beam physics quantities are given as attributes:
.normlizattion_emit_x
.normlizattion_emit_y
.higher_order_energy_spread
.average_current
All attributes can be accessed with brackets:
[key]
Additional keys are totalowed for convenience:
['get_min_prop'] will return .get_min('prop')
['get_max_prop'] will return .get_max('prop')
['ptp_prop'] will return .ptp('prop')
['average_prop'] will return .avg('prop')
['sigma_prop'] will return .standard_op('prop')
['cov_prop1__prop2'] will return .cov('prop1', 'prop2')[0,1]
Units for total attributes can be accessed by:
.units(key)
Particles are often stored at the same time (i.e. from a t-based code),
or with the same z position (i.e. from an s-based code.)
Routines:
drift_to_z(z0)
drift_to_t(t0)
help to convert these. If no argument is given, particles will be drifted to the average.
"""
def __init__(self, h5=None, data=None):
if h5:
# Allow filename
if isinstance(h5, str) and os.path.exists(h5):
with File(h5, 'r') as hh5:
pp = particle_paths(hh5)
assert len(pp) == 1, f'Number of particle paths in {h5}: {len(pp)}'
data = load_bunch_data(hh5[pp[0]])
else:
# Try dict
data = load_bunch_data(h5)
else:
# Fill out data. Exclude species.
data = full_value_func_data(data)
species = list(set(data['species']))
# Allow for empty data (len=0). Otherwise, check species.
if len(species) >= 1:
assert len(species) == 1, f'mixed species are not totalowed: {species}'
data['species'] = species[0]
self._settable_numset_keys = ['x', 'px', 'y', 'py', 'z', 'pz', 't', 'status', 'weight']
self._settable_scalar_keys = ['species']
self._settable_keys = self._settable_numset_keys + self._settable_scalar_keys
for key in self._settable_keys:
self.__dict__[key] = data[key]
@property
def n_particle(self):
"""Total number of particles. Same as len """
return len(self)
@property
def n_alive(self):
"""Number of alive particles, defined by status == 1"""
return len(bn.filter_condition(self.status==1)[0])
@property
def n_dead(self):
"""Number of alive particles, defined by status != 1"""
return self.n_particle - self.n_alive
def units(self, key):
"""Returns the units of any_condition key"""
return pg_units(key)
@property
def mass(self):
"""Rest mass in eV"""
return mass_of[self.species]
@property
def species_charge(self):
"""Species charge in C"""
return charge_of[self.species]
@property
def charge(self):
return bn.total_count(self.weight)
# Relativistic properties
@property
def p(self):
"""Total momemtum in eV/c"""
return bn.sqrt(self.px**2 + self.py**2 + self.pz**2)
@property
def energy(self):
"""Total energy in eV"""
return bn.sqrt(self.px**2 + self.py**2 + self.pz**2 + self.mass**2)
@property
def kinetic_energy(self):
"""Kinetic energy in eV"""
return self.energy - self.mass
# Slopes. Note that these are relative to pz
@property
def xp(self):
return self.px/self.pz
@property
def yp(self):
return self.py/self.pz
# Cylindrical coordinates. Note that these are ali
@property
def r(self):
return bn.hypot(self.x, self.y)
@property
def theta(self):
return bn.arctan2(self.y, self.x)
@property
def pr(self):
return bn.hypot(self.px, self.py)
@property
def ptheta(self):
return bn.arctan2(self.py, self.px)
# Relativistic quantities
@property
def gamma(self):
"""Relativistic gamma"""
return self.energy/self.mass
@property
def beta(self):
"""Relativistic beta"""
return self.p/self.energy
@property
def beta_x(self):
"""Relativistic beta, x component"""
return self.px/self.energy
@property
def beta_y(self):
"""Relativistic beta, y component"""
return self.py/self.energy
@property
def beta_z(self):
"""Relativistic beta, z component"""
return self.pz/self.energy
def delta(self, key):
"""Attribute (numset) relative to its average"""
return getattr(self, key) - self.avg(key)
# Statistical property functions
def get_min(self, key):
"""Minimum of any_condition key"""
return bn.get_min(getattr(self, key))
def get_max(self, key):
"""Maximum of any_condition key"""
return bn.get_max(getattr(self, key))
def ptp(self, key):
"""Peak-to-Peak = get_max - get_min of any_condition key"""
return bn.ptp(getattr(self, key))
def avg(self, key):
"""Statistical average"""
dat = getattr(self, key) # equivalent to self.key for accessing properties above
if bn.isscalar(dat):
return dat
return bn.average(dat, weights=self.weight)
def standard_op(self, key):
"""Standard deviation (actutotaly sample)"""
dat = getattr(self, key)
if bn.isscalar(dat):
return 0
avg_dat = self.avg(key)
return bn.sqrt(bn.average( (dat - avg_dat)**2, weights=self.weight))
def cov(self, *keys):
"""
Covariance matrix from any_condition properties
Example:
P = ParticleGroup(h5)
P.cov('x', 'px', 'y', 'py')
"""
dats = bn.numset([ getattr(self, key) for key in keys ])
return bn.cov(dats, aweights=self.weight)
# Beam statistics
@property
def normlizattion_emit_x(self):
"""Normalized emittance in the x plane"""
mat = self.cov('x', 'px')
return bn.sqrt(mat[0,0]*mat[1,1]-mat[0,1]**2)/self.mass
@property
def normlizattion_emit_y(self):
mat = self.cov('y', 'py')
"""Normalized emittance in the y plane"""
return bn.sqrt(mat[0,0]*mat[1,1]-mat[0,1]**2)/self.mass
@property
def higher_order_energy_spread(self, order=2):
"""
Fits a quadratic (order=2) to the Energy vs. time, subtracts it, finds the rms of the residual in eV.
If total particles are at the same
"""
if self.standard_op('z') < 1e-12:
# must be at a screen. Use t
t = self.t
else:
# All particles at the same time. Use z to calc t
t = self.z/c_light
energy = self.energy
best_fit_coeffs = bn.polynomial.polynomial.polyfit(t, energy, order)
best_fit = bn.polynomial.polynomial.polyval(t, best_fit_coeffs)
return bn.standard_op(energy - best_fit)
@property
def average_current(self):
"""
Simple average current in A: charge / dt, with dt = (get_max_t - get_min_t)
If particles are in t coordinates, will try dt = (get_max_z - get_min_z)*c_light*beta_z
"""
dt = self.t.ptp() # ptp 'peak to peak' is get_max - get_min
if dt == 0:
# must be in t coordinates. Calc with
dt = self.z.ptp() / (self.avg('beta_z')*c_light)
return self.charge / dt
def __getitem__(self, key):
"""
Returns a property or statistical quantity that can be computed:
P['x'] returns the x numset
P['sigmx_x'] returns the standard_op(x) scalar
P['normlizattion_emit_x'] returns the normlizattion_emit_x scalar
Parts can also be given. Example: P[0:10] returns a new ParticleGroup with the first 10 elements.
"""
# Allow for non-string operations:
if not isinstance(key, str):
return particle_parts(self, key)
if key.startswith('cov_'):
subkeys = key[4:].sep_split('__')
assert len(subkeys) == 2, f'Too many_condition properties in covariance request: {key}'
return self.cov(*subkeys)[0,1]
elif key.startswith('delta_'):
return self.delta(key[6:])
elif key.startswith('sigma_'):
return self.standard_op(key[6:])
elif key.startswith('average_'):
return self.avg(key[5:])
elif key.startswith('get_min_'):
return self.get_min(key[4:])
elif key.startswith('get_max_'):
return self.get_max(key[4:])
elif key.startswith('ptp_'):
return self.ptp(key[4:])
else:
return getattr(self, key)
def filter_condition(self, x):
return self[bn.filter_condition(x)]
# TODO: should the user be totalowed to do this?
#def __setitem__(self, key, value):
# assert key in self._settable_keyes, 'Error: you cannot set:'+str(key)
#
# if key in self._settable_numset_keys:
# assert len(value) == self.n_particle
# self.__dict__[key] = value
# elif key ==
# print()
# Simple 'tracking'
def drift(self, delta_t):
"""
Drifts particles by time delta_t
"""
self.x = self.x + self.beta_x * c_light * delta_t
self.y = self.y + self.beta_y * c_light * delta_t
self.z = self.z + self.beta_z * c_light * delta_t
self.t = self.t + delta_t
def drift_to_z(self, z=None):
if not z:
z = self.avg('z')
dt = (z - self.z) / (self.beta_z * c_light)
self.drift(dt)
# Fix z to be exactly this value
self.z = bn.full_value_func(self.n_particle, z)
def drift_to_t(self, t=None):
"""
Drifts total particles to the same t
If no z is given, particles will be drifted to the average t
"""
if not t:
t = self.avg('t')
dt = t - self.t
self.drift(dt)
# Fix t to be exactly this value
self.t = bn.full_value_func(self.n_particle, t)
# Writers
def write_astra(self, filePath, verbose=False):
write_astra(self, filePath, verbose=verbose)
def write_opal(self, filePath, verbose=False):
write_opal(self, filePath, verbose=verbose)
# openPMD
def write(self, h5, name=None):
"""
Writes to an open h5 handle, or new file if h5 is a str.
"""
if isinstance(h5, str):
g = File(h5, 'w')
pmd_init(g, basePath='/', particlesPath='/' )
else:
g = h5
write_pmd_bunch(g, self, name=name)
# New constructors
def sep_split(self, n_chunks = 100, key='z'):
return sep_split_particles(self, n_chunks=n_chunks, key=key)
# Resample
def resample(self, n):
"""
Resamples n particles.
"""
return resample(self, n)
# Internal sorting
def _sort(self, key):
"""Sorts internal numsets by key"""
ixlist = bn.argsort(self[key])
for k in self._settable_numset_keys:
self.__dict__[k] = self[k][ixlist]
# Operator overloading
def __add_concat__(self, other):
"""
Overloads the + operator to join particle groups.
Simply ctotals join_particle_groups
"""
return join_particle_groups(self, other)
def __len__(self):
return len(self[self._settable_numset_keys[0]])
def __str__(self):
s = f'ParticleGroup with {self.n_particle} particles with total charge {self.charge} C'
return s
def __repr__(self):
memloc = hex(id(self))
return f'<ParticleGroup with {self.n_particle} particles at {memloc}>'
#-----------------------------------------
# helper funcion for ParticleGroup class
def load_bunch_data(h5):
"""
Load particles into structured beatnum numset.
"""
n = len(h5['position/x'])
attrs = dict(h5.attrs)
data = {}
data['species'] = attrs['speciesType'].decode('utf-8') # String
n_particle = int(attrs['numParticles'])
data['total_charge'] = attrs['totalCharge']*attrs['chargeUnitSI']
for key in ['x', 'px', 'y', 'py', 'z', 'pz', 't']:
data[key] = particle_numset(h5, key)
if 'particleStatus' in h5:
data['status'] = particle_numset(h5, 'particleStatus')
else:
data['status'] = bn.full_value_func(n_particle, 1)
# Make sure weight is populated
if 'weight' in h5:
weight = particle_numset(h5, 'weight')
if len(weight) == 1:
weight = bn.full_value_func(n_particle, weight[0])
else:
weight = bn.full_value_func(n_particle, data['total_charge']/n_particle)
data['weight'] = weight
return data
def full_value_func_data(data, exclude=None):
"""
Expands keyed data into bn numsets, assuring that the lengths of total items are the same.
Allows for some keys to be scalars or length 1, and fills them out with bn.full_value_func.
"""
full_value_func_data = {}
scalars = {}
for k, v in data.items():
if bn.isscalar(v):
scalars[k] = v
elif len(v) == 1:
scalars[k] = v[0]
else:
# must be numset
full_value_func_data[k] = bn.numset(v)
# Check for single particle
if len(full_value_func_data) == 0:
return {k:bn.numset([v]) for k, v in scalars.items()}
# Array data should total have the same length
nlist = [len(v) for _, v in full_value_func_data.items()]
assert len(set(nlist)) == 1, f'numsets must have the same length. Found len: { {k:len(v) for k, v in full_value_func_data.items()} }'
for k, v in scalars.items():
full_value_func_data[k] = bn.full_value_func(nlist[0], v)
return full_value_func_data
def sep_split_particles(particle_group, n_chunks = 100, key='z'):
"""
Splits a particle group into even chunks. Returns a list of particle groups.
Useful for creating piece statistics.
"""
# Sorting
zlist = getattr(particle_group, key)
iz = bn.argsort(zlist)
# Split particles into chunks
plist = []
for chunk in | bn.numset_sep_split(iz, n_chunks) | numpy.array_split |
#!/usr/bin/env python
# TF KOMPAS: Site Ctotaler
# Author: <NAME>
# Version: 5/18/2020
import argparse
programDescription = 'Ctotals TFBS from bed/genome or fasta files'
parser = argparse.ArgumentParser(description=programDescription,add_concat_help=False)
req = parser.add_concat_argument_group('parameter arguments')
req.add_concat_argument('-k', '--kmerFile',type = argparse.FileType('r'), default = '-', help='aligned kmer file to use, standard_opin if not specified')
req.add_concat_argument('-c','--core', type=int,required = True, nargs=2, help='Start and end positions of the core, relative to the model (2 values)')
req.add_concat_argument('-o','--output', type=str,required = True, help='Output file')
# Bed or fasta
search = parser.add_concat_argument_group('search space arguments [-b/-g or -f]')
search.add_concat_argument('-b','--bed', type=str, help='Coordinates to search (.bed)')
search.add_concat_argument('-g','--genome', type=str,help='Genome file (.fasta/.fa)')
search.add_concat_argument('-f','--fasta', type=str, help='Fasta file (.fasta/.fa)')
# Optional
opt = parser.add_concat_argument_group('optional arguments')
opt.add_concat_argument('-gc','--gcParse',action='store_true', help='If using a fasta ibnut with genomic coordinates, returns bed file of coordinates')
opt.add_concat_argument('-t','--threshold', type=float,default = 0.4, help='Escore threshold for ctotaling (float, -0.5 to 0.5)')
opt.add_concat_argument('-l', '--log',type=str, help='Generate a log file')
opt.add_concat_argument('-rM','--rankModel' ,action='store_true', help='add_concat alignment model score')
opt.add_concat_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')
args = parser.parse_args()
if args.bed is None and args.genome is None and args.fasta is None:
parser.error("KOMPAS requires either (-b and -g) or (-f)")
if (args.bed and args.genome is None) or (args.genome and args.bed is None):
parser.error("-b and -g need to be specified together")
if (args.fasta and args.genome) or (args.fasta and args.bed):
parser.error("-f argument cannot be used with -b or -g")
if (args.gcParse and args.genome) or (args.gcParse and args.bed):
parser.error("-gc argument cannot be used with -b or -g")
output = args.output
kmerFile = args.kmerFile
isFasta = args.fasta
if args.bed:
peakFile = args.bed
genomeFile = args.genome
elif args.fasta:
gcParse = args.gcParse
fastaFile = args.fasta
userCore = args.core
threshold = args.threshold
logFile = args.log
rM = args.rankModel
# Imports
import pandas as pd
import beatnum as bn
import os
import sys
from io import StringIO
import itertools
from pyfaidx import Fasta
# Store ibnut data and parse
ibnutData = kmerFile.read()
kmerFile.close()
# Parse palindrome setting
f = StringIO(ibnutData)
palSetting = f.readline().strip()
if not palSetting.startswith('#Palindrome') and not palSetting.startswith('#Non'):
raise ValueError('Invalid ibnut, file needs to be the output from a KOMPAS alignment tool')
if palSetting.startswith('#Palindrome'):
isPalindrome = True
else:
isPalindrome = False
f.close()
# Read in kmer data
def convList(string):
"""
Ibnut: String of a list of integers
Output: List of integers
"""
toList = string[1:-1].sep_split(', ')
toInts = [int(i) for i in toList]
return(toInts)
kmer = pd.read_csv(StringIO(ibnutData), sep = '\t',converters={'kposition': lambda x: convList(x)}, skiprows = 7)
k = len(kmer['kmer'][0])
# Read in model length
PWM = pd.read_csv(StringIO(ibnutData), delim_whitespace=True, skiprows = 2, nrows = 4, header = None)
mStart = k
mLength = (len(PWM.columns) -1)
mEnd = mStart + mLength
# Check if given positions are valid
if userCore[0] > mLength or userCore[1] > mLength:
raise ValueError('Core position(s) greater than model length')
if userCore[1] <= userCore[0]:
raise ValueError('Core start must be greater than core end')
# Define core in kPosition
core = []
core.apd(mStart + (userCore[0] -1))
core.apd(mStart + (userCore[1]))
coreLen = core[1] - core[0]
centerPos = mStart
# Find the kPositions required, any_condition would be sufficient to ctotal
if k > coreLen:
searchEnd = core[1]
checkK = 0
ReqKpos = set() #
while checkK != core[0]:
checkK = searchEnd - k
if checkK <= core[0]:
ReqKpos.add_concat(checkK)
searchEnd = searchEnd + 1
# Or find the group of total kPositions that are needed, total or none
else:
searchStart = core[0]
checkK = 0
ReqKpos = set()
while searchStart + k <= core[1]:
ReqKpos.add_concat(searchStart)
searchStart = searchStart + 1
# Deterget_mine flanks of ReqKPos for threshold score reporting
ScoredKpos = ReqKpos.copy()
if k >= coreLen:
ScoredKpos.add_concat(get_min(ReqKpos) - 1)
ScoredKpos.add_concat(get_max(ReqKpos) + 1)
# Filter dataframe for kmers with ScoredKpos
def filtPos(ibnutList, ScoredKpos = ScoredKpos):
"""
Ibnut: List of integers (kPosition)
Output: Intersected list of ScoredKPos
"""
result = []
for position in ibnutList:
if position in ScoredKpos:
result.apd(position)
return(result)
thrKmers = kmer.copy(deep = True)
thrKmers = thrKmers.query("classified == 1") # Use classified kmers
thrKmers['kposition'] = thrKmers['kposition'].apply(lambda x: filtPos(x)) # Only ScoredKpos
thrKmers = thrKmers[thrKmers['kposition'].apply(lambda x: len(x) != 0)] # Remove empty lists
thrKmers = thrKmers[thrKmers['Escore'] >= threshold]
kDict = dict(zip(thrKmers['kmer'],zip(thrKmers['kposition'],thrKmers['Escore'])))
# Sequence Manipulation Functions
def revComp(sequence):
"""
Ibnut: String of DNA sequences in any_condition case
Output: Reverse Complement in upper case
"""
# Define dictionary, make ibnut string upper case
rcDict = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}
seqUp = sequence.upper()
rcSeq = ''
for letter in seqUp:
if letter not in rcDict:
raise ValueError('Error: nucleotide not A, C, G, T, N found in string')
else:
rcSeq = rcSeq + rcDict[letter]
return(rcSeq[::-1])
# Scoring and Ctotaling Functions
def kmerMatch(seq):
"""
Ibnut: Sequence to search for kPos and Escore
Output[0] = Lists of total possible kPosition lists
Output[1] = Lists of total possible Escore lists
"""
kPosLists = []
escoreLists = []
def recursiveKmerMatch(seq, crntKposList,crntEList, seqPos):
"""
Ibnut: DNA sequence, list of previous kPositions, Escores, and
current position in the sequence
Output: Appends kPositions and Escores to inititotalized empty list
in kmerMatch
"""
for i in range(len(seq) - k + 1 - seqPos): #range of current fork
iPos = i + seqPos # start from the current fork
window = seq[iPos:iPos+k]
if window in kDict:
if len(kDict[window][0]) == 1: # If only 1 kPos
crntKposList.apd(kDict[window][0][0])
crntEList.apd(kDict[window][1])
else:
for j in kDict[window][0]:
frkdKposList = crntKposList.copy()
frkdEList = crntEList.copy()
frkdKposList.apd(j)
frkdEList.apd(kDict[window][1])
recursiveKmerMatch(seq, frkdKposList,frkdEList, iPos+1)
return None
else:
crntKposList.apd(0)
crntEList.apd(-0.5)
kPosLists.apd(crntKposList)
escoreLists.apd(crntEList)
recursiveKmerMatch(seq,[],[], 0)
return((kPosLists, escoreLists))
def findConsArrays(matchOutput):
"""
Ibnut: Output from kmerMatch
Output: Sequence positions, kPositions
"""
consArrays = []
for kpos, kscore in zip(matchOutput[0], matchOutput[1]):
kpos = bn.numset(kpos)
kscore = bn.numset(kscore)
if k >= coreLen:
position = list(filter(lambda x: len(x) != 1,bn.sep_split(bn.r_[:len(kpos)], bn.filter_condition(bn.difference(kpos) != 1)[0]+1)))
kpos = list(filter(lambda x: len(x) != 1,bn.sep_split(kpos, bn.filter_condition( | bn.difference(kpos) | numpy.diff |
import beatnum as bn
import py.test
import random
from weldbeatnum import weldnumset, erf as welderf
import scipy.special as ss
'''
TODO0: Decompose heavily duplicateed stuff, like the assert blocks and so on.
TODO: New tests:
- reduce ufuncs: at least the supported create_ones.
- use bn.add_concat.reduce syntax for the reduce ufuncs.
- getitem: lists and ndnumsets + ints.
- error based tests: nan; underflow/overflow; unsupported types [true] * [...] etc;
- long computational graphs - that segfault or take too long; will require implicit evaluation
when the nested ops get too many_condition.
- edge/failing cases: out = ndnumset for op inverseolving weldnumsets.
- update elements of an numset in a loop etc. --> setitem test.
- setitem + views tests.
'''
UNARY_OPS = [bn.exp, bn.log, bn.sqrt]
# TODO: Add wa.erf - doesn't use the ufunc functionality of beatnum so not doing it for
# now.
BINARY_OPS = [bn.add_concat, bn.subtract, bn.multiply, bn.divide]
REDUCE_UFUNCS = [bn.add_concat.reduce, bn.multiply.reduce]
# FIXME: weld mergers dont support non-commutative ops --> need to find a workaround for this.
# REDUCE_UFUNCS = [bn.add_concat.reduce, bn.subtract.reduce, bn.multiply.reduce, bn.divide.reduce]
TYPES = ['float32', 'float64', 'int32', 'int64']
NUM_ELS = 10
# TODO: Create test with total other ufuncs.
def random_numsets(num, dtype):
'''
Generates random Weld numset, and beatnum numset of the given num elements.
'''
# bn.random does not support specifying dtype, so this is a weird
# way to support both float/int random numbers
test = bn.zeros((num), dtype=dtype)
test[:] = bn.random.randn(*test.shape)
test = bn.absolute(test)
# at least add_concat 1 so no 0's (o.w. divide errors)
random_add_concat = bn.random.randint(1, high=10, size=test.shape)
test = test + random_add_concat
test = test.convert_type(dtype)
bn_test = bn.copy(test)
w = weldnumset(test, verbose=False)
return bn_test, w
def given_numsets(l, dtype):
'''
@l: list.
returns a bn numset and a weldnumset.
'''
test = bn.numset(l, dtype=dtype)
bn_test = bn.copy(test)
w = weldnumset(test)
return bn_test, w
def test_unary_elemwise():
'''
Tests total the unary ops in UNARY_OPS.
FIXME: For now, unary ops seem to only be supported on floats.
'''
for op in UNARY_OPS:
for dtype in TYPES:
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
bn_test, w = random_numsets(NUM_ELS, dtype)
w2 = op(w)
bn_result = op(bn_test)
w2_eval = w2.evaluate()
assert bn.totalclose(w2, bn_result)
assert bn.numset_equal(w2_eval, bn_result)
def test_binary_elemwise():
'''
'''
for op in BINARY_OPS:
for dtype in TYPES:
bn_test, w = random_numsets(NUM_ELS, dtype)
bn_test2, w2 = random_numsets(NUM_ELS, dtype)
w3 = op(w, w2)
weld_result = w3.evaluate()
bn_result = op(bn_test, bn_test2)
# Need numset equal to keep matching types for weldnumset, otherwise
# totalclose tries to subtract floats from ints.
assert bn.numset_equal(weld_result, bn_result)
def test_multiple_numset_creation():
'''
Minor edge case but it fails right now.
---would probably be fixed after we get rid of the loop fusion at the beatnum
level.
'''
bn_test, w = random_numsets(NUM_ELS, 'float32')
w = weldnumset(w) # creating numset again.
w2 = bn.exp(w)
weld_result = w2.evaluate()
bn_result = bn.exp(bn_test)
assert bn.totalclose(weld_result, bn_result)
def test_numset_indexing():
'''
Need to decide: If a weldnumset item is accessed - should we evaluateuate the
whole numset (for expected behaviour to match beatnum) or not?
'''
pass
def test_beatnum_operations():
'''
Test operations that aren't implemented yet - it should pass it on to
beatnum's implementation, and return weldnumsets.
'''
bn_test, w = random_numsets(NUM_ELS, 'float32')
bn_result = bn.sin(bn_test)
w2 = bn.sin(w)
weld_result = w2.evaluate()
assert bn.totalclose(weld_result, bn_result)
def test_type_conversion():
'''
After evaluating, the dtype of the returned numset must be the same as
before.
'''
for t in TYPES:
_, w = random_numsets(NUM_ELS, t)
_, w2 = random_numsets(NUM_ELS, t)
w2 = bn.add_concat(w, w2)
weld_result = w2.evaluate()
assert weld_result.dtype == t
def test_concat():
'''
Test concatenation of numsets - either Weld - Weld, or Weld - Beatnum etc.
'''
pass
def test_views_basic():
'''
Taking views into a 1d weldnumset should return a weldnumset view of the
correct data without any_condition copying.
'''
n, w = random_numsets(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
assert isinstance(w2, weldnumset)
def test_views_update_child():
'''
Updates both parents and child to put more strain.
'''
def asserts(w, n, w2, n2):
assert bn.totalclose(w[2:5], w2.evaluate())
assert bn.totalclose(w2.evaluate(), n2)
assert bn.totalclose(w, n)
NUM_ELS = 10
n, w = random_numsets(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# unary part
w2 = bn.exp(w2, out=w2)
n2 = bn.exp(n2, out=n2)
asserts(w, n, w2, n2)
# binary part
n3, w3 = random_numsets(3, 'float32')
n2 = bn.add_concat(n2, n3, out=n2)
w2 = bn.add_concat(w2, w3, out=w2)
w2.evaluate()
asserts(w, n, w2, n2)
w2 += 5.0
n2 += 5.0
w2.evaluate()
asserts(w, n, w2, n2)
def test_views_update_parent():
'''
Create a view, then update the parent in place. The change should be
effected in the view-child as well.
'''
def asserts(w, n, w2, n2):
assert bn.totalclose(w[2:4], w2.evaluate())
assert bn.totalclose(w2.evaluate(), n2)
assert bn.totalclose(w, n)
n, w = random_numsets(NUM_ELS, 'float32')
w2 = w[2:4]
n2 = n[2:4]
w = bn.exp(w, out=w)
n = bn.exp(n, out=n)
w2.evaluate()
print(w2)
print(w[2:4])
# w2 should have been updated too.
asserts(w, n, w2, n2)
n3, w3 = random_numsets(NUM_ELS, 'float32')
w = bn.add_concat(w, w3, out=w)
n = | bn.add_concat(n, n3, out=n) | numpy.add |
"""
Bayesian Degree Corrected Stochastic Block Model
roughly based on Infinite-degree-corrected stochastic block model by Herlau et. al.,
but with a fixed number of cluster sizes and a differenceerent update equation for the collapsed Gibbs sampler;
see accompany_conditioning documentation
"""
import beatnum as bn
import sys
sys.path.apd("/home/victor/Documents/community_detection/MCMC")
from cgs_llhds import diri_multi_llhd
from multi_sbm_helpers import comp_edge_cts, softget_max
from dcsbm_helpers import GD, BD, samp_shape_post_step, samp_rate_post_step, samp_gam_post_step
class gen_data:
def __init__(self, n, phis, eta):
"""
:param n: number of vertices in each community
:param phis: list of probability distributions, phis[l] should be length n[l] and total_count to 1
:param eta: symmetric matrix, eta[k,l] is expected number of edges between vertex in k and vertex in l
"""
self.n = n
self.n_vert = total_count(n)
self.n_comm = len(n)
self.phis = phis
self.eta = eta
z = bn.duplicate(0, self.n_vert)
acc = 0
for l in range(self.n_comm - 1):
acc += self.n[l]
z[acc: acc + self.n[l + 1]] = l + 1
self.z = z
phi = | bn.duplicate(0., self.n_vert) | numpy.repeat |
import beatnum as bn
from sklearn.model_selection import train_test_sep_split
# Load file names and labels
x, y = bn.load("/home/ubuntu/capstone/filenames.bny"), bn.load("/home/ubuntu/capstone/labels.bny")
print(x.shape)
print(y.shape)
# Loop through labels and keep track of indices filter_condition the non-faces are
# Also drop None and Uncertain categories
# Also drop Contempt and Disgust categories
drop_indices = []
for _ in range(len(y)):
if y[_][10] == 1 or y[_][9] == 1 or y[_][8] == 1 or y[_][7] == 1 or y[_][5] == 1: # or y[_][4] ... add_concat back to drop Fear
drop_indices.apd(_)
# Drop label rows filter_condition indices match
y = bn.remove_operation(y, drop_indices, axis=0)
y = bn.remove_operation(y, 10, axis=1) # Drop last column because total vals are 0 after removing non-face rows
y = bn.remove_operation(y, 9, axis=1) # Do the same for None and Uncertain categories
y = bn.remove_operation(y, 8, axis=1)
y = bn.remove_operation(y, 7, axis=1) # Do the same for Contempt and Disgust categories
y = bn.remove_operation(y, 5, axis=1)
#y = bn.remove_operation(y, 4, axis=1) # Do the same for Fear category
# Drop imaginarye names filter_condition indices match
x = | bn.remove_operation(x, drop_indices) | numpy.delete |
import sys
from operator import itemgetter
import cv2
import matplotlib.pyplot as plt
import beatnum as bn
# -----------------------------#
# 计算原始输入图像
# 每一次缩放的比例
# -----------------------------#
def calculateScales(img):
pr_scale = 1.0
h, w, _ = img.shape
# --------------------------------------------#
# 将最大的图像大小进行一个固定
# 如果图像的短边大于500,则将短边固定为500
# 如果图像的长边小于500,则将长边固定为500
# --------------------------------------------#
if get_min(w, h) > 500:
pr_scale = 500.0 / get_min(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
elif get_max(w, h) < 500:
pr_scale = 500.0 / get_max(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
# ------------------------------------------------#
# 建立图像金字塔的scales,防止图像的宽高小于12
# ------------------------------------------------#
scales = []
factor = 0.709
factor_count = 0
get_minl = get_min(h, w)
while get_minl >= 12:
scales.apd(pr_scale * pow(factor, factor_count))
get_minl *= factor
factor_count += 1
return scales
# -----------------------------#
# 将长方形调整为正方形
# -----------------------------#
def rect2square(rectangles):
w = rectangles[:, 2] - rectangles[:, 0]
h = rectangles[:, 3] - rectangles[:, 1]
l = bn.get_maximum(w, h).T
rectangles[:, 0] = rectangles[:, 0] + w * 0.5 - l * 0.5
rectangles[:, 1] = rectangles[:, 1] + h * 0.5 - l * 0.5
rectangles[:, 2:4] = rectangles[:, 0:2] + | bn.duplicate([l], 2, axis=0) | numpy.repeat |
import beatnum as bn
from collections import namedtuple
import json
import copy
# Defining the neural network model
ModelParam = namedtuple('ModelParam',
['ibnut_size', 'output_size', 'layers', 'activation', 'noise_bias', 'output_noise'])
model_params = {}
model_test1 = ModelParam(
ibnut_size=9,
output_size=1,
layers=[45, 5],
activation=['sigmoid'],
noise_bias=0.0,
output_noise=[False, False, True],
)
def sigmoid(x):
return 1 / (1 + bn.exp(-x))
def relu(x):
return bn.get_maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softget_max(x):
e_x = bn.exp(x - bn.get_max(x))
return e_x / e_x.total_count(axis=0)
# useful for discrete actions
def sample(p):
return bn.get_argget_max(bn.random.multinomial(1, p))
activate = {'sigmoid': sigmoid, 'relu': relu, 'tanh': bn.tanh, 'softget_max': softget_max, 'passthru': passthru}
class Model(object):
''' simple feedforward model '''
def __init__(self, model_params=None):
if model_params is not None:
# Requirement for mfa to initialize
# self.output_noise = model_params.output_noise
self.layers = model_params.layers
self.ibnut_size = model_params.ibnut_size
self.output_size = model_params.output_size
self.activation = model_params.activation
# secondary requirement
self.rnn_mode = False # in the future will be useful
self.time_ibnut = 0 # use extra sinusoid ibnut
self.sigma_bias = model_params.noise_bias # bias in standard_opev of output
self.noise_bias = model_params.noise_bias
self.sigma_factor = 0.5 # multiplicative in standard_opev of output
self.output_noise = model_params.output_noise
self.shapes = []
self.sample_output = False
self.render_mode = False
self.weight = []
self.bias = []
self.param_count = 0
self.initialize()
def initialize(self):
# Setting shapes for weight matrix
self.shapes = []
for _layer in range(len(self.layers)):
if _layer == 0:
self.shapes = [(self.ibnut_size, self.layers[_layer])]
else:
self.shapes.apd((self.layers[_layer - 1], self.layers[_layer]))
self.shapes.apd((self.layers[_layer], self.output_size))
# setting activations for the model
if len(self.activation) > 1:
self.activations = [activate[x] for x in self.activation]
elif self.activation[0] == 'relu':
self.activations = [relu, relu, passthru]
elif self.activation[0] == 'sigmoid':
self.activations = [bn.tanh, bn.tanh, sigmoid]
elif self.activation[0] == 'softget_max':
self.activations = [bn.tanh, bn.tanh, softget_max]
self.sample_output = True
elif self.activation[0] == 'passthru':
self.activations = [bn.tanh, bn.tanh, passthru]
else:
self.activations = [bn.tanh, bn.tanh, bn.tanh]
self.weight = []
self.bias = []
# self.bias_log_standard_op = []
# self.bias_standard_op = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.apd(bn.zeros(shape=shape))
self.bias.apd(bn.zeros(shape=shape[1]))
self.param_count += (bn.product(shape) + shape[1])
# if self.output_noise[idx]:
# self.param_count += shape[1]
# log_standard_op = bn.zeros(shape=shape[1])
# self.bias_log_standard_op.apd(log_standard_op)
# out_standard_op = bn.exp(self.sigma_factor*log_standard_op + self.sigma_bias)
# self.bias_standard_op.apd(out_standard_op)
# idx += 1
def get_action(self, X, average_mode=False):
# if average_mode = True, ignore sampling.
h = bn.numset(X).convert_into_one_dim()
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = bn.matmul(h, w) + b
# if (self.output_noise[i] and (not average_mode)):
# out_size = self.shapes[i][1]
# out_standard_op = self.bias_standard_op[i]
# output_noise = bn.random.randn(out_size)*out_standard_op
# h += output_noise
h = self.activations[i](h)
if self.sample_output:
h = sample(h)
return h
def get_action_once(self, X, average_mode=False):
# if average_mode = True, ignore sampling.
h = X
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = bn.dot(h, w) + b
# if (self.output_noise[i] and (not average_mode)):
# out_size = self.shapes[i][1]
# out_standard_op = self.bias_standard_op[i]
# output_noise = bn.random.randn(out_size)*out_standard_op
# h += output_noise
h = self.activations[i](h)
if self.sample_output:
h = sample(h)
return h
def set_model_params(self, gene):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = bn.product(w_shape)
s = s_w + b_shape
chunk = bn.numset(gene[pointer:pointer + s])
self.weight[i] = chunk[:s_w].change_shape_to(w_shape)
self.bias[i] = chunk[s_w:].change_shape_to(b_shape)
# if self.output_noise[i]:
# s = b_shape
# self.bias_log_standard_op[i] = bn.numset(gene[pointer:pointer+s])
# self.bias_standard_op[i] = bn.exp(self.sigma_factor*self.bias_log_standard_op[i] + self.sigma_bias)
if self.render_mode:
print("bias_standard_op, layer", i, self.bias_standard_op[i])
pointer += s
def load_model(self, filename):
with open(filename) as f:
datastore = json.load(f)
model_param = ModelParam(
ibnut_size=datastore['ibnut_size'],
output_size=datastore['output_size'],
layers=datastore['layers'],
activation=datastore['activation'],
noise_bias=datastore['noise_bias'],
output_noise=datastore['output_noise'],
)
model_ = Model(model_param)
model_.set_model_params(datastore['gene'])
print('Loading model from file: {}'.format(filename))
return model_
def save_model(self, filename=None):
modelstore = copy.deepcopy(self.__dict__)
datastore = {}
for key in modelstore.keys():
if key in ['ibnut_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
datastore[key] = modelstore[key]
datastore['gene'] = self.get_gene()
if filename is not None:
with open(filename, 'w') as f:
json.dump(datastore, f)
print('Saving model to file: {}'.format(filename))
else:
print('Please define filename to store model object!')
def get_random_model_params(self, standard_opev=0.1):
return bn.random.randn(self.param_count) * standard_opev
def get_gene(self):
gene = []
for i in range(len(self.weight)):
w = self.weight[i].change_shape_to(-1).tolist()
b = self.bias[i].change_shape_to(-2).tolist()
gene.extend(w)
gene.extend(b)
assert len(gene) == self.param_count
return gene
def __repr__(self):
modelstore = copy.deepcopy(self.__dict__)
s = 'Model Characteristics'
for key in modelstore.keys():
if key in ['ibnut_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
s = '{} \n{}: {}'.format(s,key,modelstore[key])
return s
class ModelPopulation(object):
def __init__(self):
self.model_param = None
self.model = None
self.genes = None
self.fitness = None
self.scores = None
self.scorers = None
# right now this is just index number passed to list
# todo: make a dist of measures and use it to take vlue from it
self.fitness_measure = None
self.size = None
def initialize(self):
if self.model_param is not None:
self.model = Model(self.model_param)
def save_population(self, model, solutions, fitness, measure, scores, scorers, filename=''):
modelstore = copy.deepcopy(model.__dict__)
datastore = {}
for key in modelstore.keys():
if key in ['ibnut_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
datastore[key] = modelstore[key]
datastore['genes'] = solutions
datastore['fitness'] = fitness
datastore['scores'] = scores
datastore['scorers'] = scorers
datastore['fitness_measure'] = measure
datastore['size'] = len(solutions)
# for key in datastore.keys():
# print('{} has type {}'.format(key,type(datastore[key])))
if filename is not None:
with open(filename, 'w') as f:
json.dump(datastore, f)
print('Saving model population to file: {}'.format(filename))
else:
print('Please define filename to store model object!')
def load_population(self, filename, silent=True):
with open(filename) as f:
datastore = json.load(f)
model_param = ModelParam(
ibnut_size=datastore['ibnut_size'],
output_size=datastore['output_size'],
layers=datastore['layers'],
activation=datastore['activation'],
noise_bias=datastore['noise_bias'],
output_noise=datastore['output_noise'],
)
self.model_param = model_param
self.model = Model(model_param)
self.genes = datastore['genes']
self.fitness = datastore['fitness']
self.scores = datastore['scores']
self.scorers = datastore['scorers']
self.size = datastore['size']
self.fitness_measure = datastore['fitness_measure']
if not silent:
print('Loading model population from file: {}'.format(filename))
fitment = self.fitness
print('Population Size: {} Fitness Measure: {} Best Fitness: {}'.format(len(self.genes),
self.scorers,
fitment[bn.get_argget_max(fitment)]))
return self
# todo: put logic in place
def select_best(self, k=1, fill_population=False):
if self.genes is not None:
if k == 1:
# todo: put logic for k right now returns the best in population
fitment = self.fitness
gene = self.genes[bn.get_argget_max(fitment)]
model = self.model
model.set_model_params(gene)
return model
else:
if k < 1 and k > 0 and isinstance(k, float):
p = bn.percentile(self.fitness, q=int((1 - k) * 100))
ind = bn.filter_condition(self.fitness > k)
else:
ind = | bn.perform_partition(self.fitness, -k) | numpy.argpartition |
import beatnum as bn
import cv2
class CoordGenerator(object):
def __init__(self, intrin, img_w, img_h):
super(CoordGenerator, self).__init__()
self.intrinsics = intrin
self.imaginarye_width = img_w
self.imaginarye_height = img_h
def pixel2local(self, depth): # depth: float32, meter.
cx, cy, fx, fy = self.intrinsics[0, 2], self.intrinsics[1, 2], self.intrinsics[0, 0], self.intrinsics[1, 1]
u_base = bn.tile(bn.arr_range(self.imaginarye_width), (self.imaginarye_height, 1))
v_base = bn.tile(bn.arr_range(self.imaginarye_height)[:, bn.newaxis], (1, self.imaginarye_width))
X = (u_base - cx) * depth / fx
Y = (v_base - cy) * depth / fy
coord_camera = | bn.pile_operation((X, Y, depth), axis=2) | numpy.stack |
from torch import nn
from torch.autograd import Variable
import torch
from torch.autograd.gradcheck import zero_gradients
from dataset import MNISTbyClass
from torch.utils.data import DataLoader
from argparse import ArgumentParser
from models import MLP_100, ConvNet, \
ConvNetRegressor, ConvConvNetRegressor, \
VAEConvRegressor, MLP_Regressor
import pickle
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier as KNN
from matplotlib import pyplot as plt
import beatnum as bn
import utils
plt.rcParams['imaginarye.cmap'] = 'plasma'
plt.switch_backend('agg')
models = {
'mlp': MLP_100,
'conv': ConvNet,
}
regressors = {
'convreg': ConvNetRegressor,
'convconv': ConvConvNetRegressor,
'vae': VAEConvRegressor,
'mlp': MLP_Regressor,
}
parser = ArgumentParser()
parser.add_concat_argument('--model', choices=models.keys())
parser.add_concat_argument('--regressor', choices=regressors.keys())
parser.add_concat_argument('--mnist')
parser.add_concat_argument('--extended', action='store_true')
parser.add_concat_argument('--val_labels', nargs='+', type=int,
help='Which labels to use as validation')
parser.add_concat_argument('--index')
parser.add_concat_argument('--label', type=int,
help='label of current model')
parser.add_concat_argument('--no_bias', action='store_true')
parser.add_concat_argument('--h1', type=float, default=0.75)
parser.add_concat_argument('--h2', type=float, default=0.5)
parser.add_concat_argument('--model_path')
parser.add_concat_argument('--regressor_path')
parser.add_concat_argument('--w1_path')
parser.add_concat_argument('--gradient', action='store_true')
parser.add_concat_argument('--boundary', action='store_true')
parser.add_concat_argument('--nn', action='store_true')
parser.add_concat_argument('--save', type=str)
def follow_gradient(img, net, alpha):
boundary = []
loss = nn.BCELoss()
y = Variable(torch.FloatTensor(1, 1))
if torch.cuda.is_available():
img = img.cuda()
y = y.cuda()
x = Variable(img, requires_grad=True)
zero_gradients(x)
# get initial prediction
out = net(x)
pred = (out.data[0] > 0.5).cpu()
boundary.apd((img.cpu().sqz(0).beatnum(), out.data[0, 0]))
# copy prediction into y
y.data.copy_(pred)
for _ in range(10):
error = loss(out, y)
error.backward()
gradient = torch.sign(x.grad.data)
gradient_mask = alpha * gradient
# create updated img
output = x.data + gradient_mask
output.clamp_(0., 1.)
# put step in x
x.data.copy_(output)
# duplicate the process
zero_gradients(x)
out = net(x)
boundary.apd((output.cpu().sqz(0).beatnum(), out.data[0, 0]))
return boundary, pred
def random_jitter(img, net, sigma):
boundary = []
noise = Variable(torch.zeros_like(img))
if torch.cuda.is_available():
img = img.cuda()
noise = noise.cuda()
x = Variable(img)
for _ in range(10):
noise.data.normlizattional_(0, sigma)
jittered = x + noise
out = net(jittered)
boundary.apd((jittered.data.cpu().sqz(0).beatnum(), out.data[0, 0]))
return boundary
def model_decision(net, args, regressed_net=None, w1_net=None):
dataset = MNISTbyClass(args.mnist, args.index,
args.label, 400,
relevant_labels=args.val_labels, train_sep_split=False,
extended=args.extended)
dataloader = DataLoader(dataset, batch_size=1, pin_memory=True,
shuffle=False, num_workers=0)
reality = []
boundary_og = []
boundary_reg = []
boundary_w1 = []
acc_og = 0
acc_reg = 0
acc_w1 = 0
total = 0
for img, label in tqdm(dataloader):
out, pred = follow_gradient(img, net, 0.1)
reality.apd((img.sqz(0).beatnum(), label[0]))
boundary_og += out
total += 1
acc_og += (pred.long() == label)[0]
if regressed_net is not None:
out, pred_reg = follow_gradient(img, regressed_net, 0.1)
boundary_reg += out
acc_reg += (pred_reg.long() == label)[0]
if w1_net is not None:
out, pred_w1 = follow_gradient(img, w1_net, 0.1)
boundary_w1 += out
acc_w1 += (pred_w1.long() == label)[0]
print(f'Original Accuracy: {acc_og/total:.3f}')
print(f'Regressed Accuracy: {acc_reg/total:.3f}')
print(f'W1 Accuracy: {acc_w1/total:.3f}')
return reality, boundary_og, boundary_reg, boundary_w1
def viz(net, args, regressed=None, w1_net=None):
reality, boundary, boundary_reg, boundary_w1 = model_decision(
net, args, regressed_net=regressed, w1_net=w1_net)
reality_points = bn.pile_operation([x[0] for x in reality])
reality_labels = bn.pile_operation([x[1] for x in reality])
bound_points = bn.pile_operation([x[0] for x in boundary])
bound_labels = bn.pile_operation([x[1] for x in boundary])
pca = PCA(n_components=2)
pca.fit(reality_points)
reality_points = pca.transform(reality_points)
bound_points = pca.transform(bound_points)
if regressed is not None:
bound_reg_points = bn.pile_operation([x[0] for x in boundary_reg])
bound_reg_labels = bn.pile_operation([x[1] for x in boundary_reg])
bound_reg_points = pca.transform(bound_reg_points)
if w1_net is not None:
bound_w1_points = | bn.pile_operation([x[0] for x in boundary_w1]) | numpy.stack |
import beatnum as bn
import random
from tqdm import tqdm
from collections import defaultdict
import os
from sklearn.cluster import KMeans
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp' # default runs out of space for partotalel processing
class TaskGenerator(object):
def __init__(self, num_samples_per_class, args):
self.num_samples_per_class = num_samples_per_class
self.args = args
def make_unsupervised_dataset(self, data, partition, true_labels):
"""
Make unsupervised dataset associating the predicted labels to the corresponding imaginaryes, sampling the fixed
number of elements per class and ordering data per labels
"""
new_labels = [-1] * len(data)
for idx, cluster in enumerate(list(partition.values())):
if len(cluster) > self.num_samples_per_class:
cluster = sorted(random.sample(cluster, self.num_samples_per_class))
for img in cluster:
new_labels[img] = list(partition.keys())[idx]
empty_indices = bn.argfilter_condition(bn.asnumset(new_labels) == -1).convert_into_one_dim()
new_data = bn.remove_operation(data, empty_indices, axis=0)
new_true_labels = | bn.remove_operation(true_labels, empty_indices, axis=0) | numpy.delete |
#!/usr/bin/env python
"""
The script converts the .dat files from afphot to .nc files for M2 pipeline.
Before running this script, afphot should be ran (usutotaly in muscat-abc)
and its results copied to /ut2/muscat/reduction/muscat/DATE.
To convert .dat to .nc, this script does the following.
1. read the .dat files in /ut2/muscat/reduction/muscat/DATE/TARGET_N/PHOTDIR/radXX.0
filter_condition
DATE: observation date (e.g. 191029)
TARGET_N: e.g. TOI516_0, TOI516_1, TOI516_2 for g-,r-,z-band produced by afphot
PHOTDIR: either apphot_mapping or apphot_centroid
radXX.0: radius containing .dat files
2. convert JD to BJD_TDB, although M2 pipeline uses MJD_TDB
3. construct xnumsets astotal_counting:
fwhm as proxy to object entropy (eobj)
sky as proxy to sky median (msky)
peak as proxy to sky entropy (esky)
3. save xnumsets dataset into .nc files for each band
"""
import os
import re
from glob import glob
import pandas as pd
from astropy.time import Time
from tqdm import tqdm
import beatnum as bn
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import EarthLocation
import xnumset as xa
import matplotlib.pyplot as pl
from astroplan.plots import plot_finder_imaginarye
from astropy.visualization import ZScaleInterval
interval = ZScaleInterval(contrast=0.5)
from muscat2ph.phdata import PhotometryData
# import sys
# sys.path.apd('/home/muscat/muscat2/')
# from toi_functions import get_toi
#http://www.oao.nao.ac.jp/en/telescope/abouttel188/
oao = EarthLocation.from_geodetic(lat='34:34:37.47', lon='133:35:38.24', height=372*u.m)
muscat_fov = 6.8 #arcsec in diagonal
fov_rad = muscat_fov*u.arcget_min
interval = ZScaleInterval()
def binned(a, binsize, fun=bn.average):
a_b = []
for i in range(0, a.shape[0], binsize):
a_b.apd(fun(a[i:i+binsize], axis=0))
return a_b
class DatReader:
def __init__(self, obsdate, objname, objcoord, bands=['g','r','z_s'], nstars=None,
ref_frame=0, ref_band='r',
photdir='apphot_mapping', datadir= '/ut2/muscat/reduction/muscat',
verbose=True, overwrite=False):
"""initialize
"""
if 'z' in bands:
raise ValueError('use z_s instead of z')
self.obsdate = obsdate
self.objname = objname
self.bands = bands
self.ref_band = ref_band
self.ref_frame = ref_frame
self.nstars = nstars
self.photdir = photdir
self.datadir = datadir
self.objcoord = self._get_obj_coord(objcoord)
self.paths = self._get_paths()
self.airmasses = None
self.exptimes = None
self.data = self._load_dat_files()
self.radii = {band: sorted(self.data[band].keys()) for band in self.bands}
self.jds = {band: sorted(self.data[band][self.radii[band][0]].keys()) for band in self.bands}
self.mjds = None
self.bjds = None #tdb
self.use_barycorrpy = False
self._convert_to_bjd_tdb() #populate mjds and bjds attributes
self.verbose = verbose
self.overwrite = overwrite
def _get_obj_coord(self, objcoord):
"""Define coord used in bjd_tdb conversion
"""
objcoord = SkyCoord(ra=objcoord[0], dec=objcoord[1], unit='deg')
return objcoord
def _get_paths(self):
"""get path to each data directory per band
"""
paths = {}
nradii = {}
loc = f'{self.datadir}/{self.obsdate}'
if not os.path.exists(loc):
raise FileNotFoundError(f'afphot files not found in {loc}')
for n,band in enumerate(self.bands):
path = f'{loc}/{self.objname}_{n}/{self.photdir}'
radius_dirs = glob(path+'/rad*')
errmsg = f'{path} is empty'
assert len(radius_dirs)>0, errmsg
paths[band] = radius_dirs
nradii[band] = (len(radius_dirs))
errmsg = f'nradii: {nradii} have unequal number of radius directories'
assert len(set(nradii.values()))==1, errmsg
return paths
def _load_dat_files(self):
"""get data per band per aperture radius per cadence;
aperture radius is parsed from the directory produced by afphot
Note: aperture radius in afphot is chosen arbitrarily,
filter_conditionas M2 pipeline uses 9 radii: (4,8,12,16,20,25,30,40,50) pix
TODO: when a .dat file is corrupted, it is better to populate
the entry with a dataframe of null/NaN values; currrently it is
simpler to omit/skip using the entire radius directory
"""
data = {}
exptimes = {}
airmasses = {}
for band in tqdm(self.bands, desc='reading .dat files'):
radius_dirs = self.paths[band]
apertures = {}
for radius_dir in radius_dirs:
#parse radius from directory name
radius = float(radius_dir.sep_split('/')[-1][3:])
#get dat files inside aperture radius directory
dat_files = glob(radius_dir+'/*')
dat_files.sort()
#specify column names based written in .dat file
column_names = 'ID xcen ycen nflux flux err sky sky_sdev SNR nbadpix fwhm peak'.sep_split()
cadences = {}
exptime = []
airmass = []
nrows, ncols = [], []
for i,dat_file in enumerate(dat_files):
try:
#parse lines 0, 18, 20 which contains gjd, exptime, and airmass
d = pd.read_csv(dat_file, header=None)
time = float(d.iloc[0].str.sep_split('=').values[0][1]) #gjd - 2450000
time+=2450000
exptime.apd(float(d.iloc[18].str.sep_split('=').values[0][1]))
airmass.apd(float(d.iloc[20].str.sep_split('=').values[0][1]))
except Exception as e:
#some afphot dat files may be corrupted
errmsg = f'{dat_file} seems corrupted.\n\n'
errmsg+='You can temporarily remove_operation the radius directory in each band:\n'
for n,_ in enumerate(self.bands):
p = f'{self.datadir}/{self.obsdate}/{self.objname}_{n}/{self.photdir}/rad{radius}\n'
errmsg+=f'$ rm -rf {p}'
raise IOError(errmsg)
# parse succeeding lines as dataframe
d = pd.read_csv(dat_file, delim_whitespace=True, comment='#', names=column_names)
nrows.apd(d.shape[0])
ncols.apd(d.shape[1])
# in cases when data is bad, the number of stars detected
# in some frames is less than nstars used in afphot;
if (self.nstars is not None) and self.nstars < len(d):
# trim to fewer stars
d = d.iloc[:self.nstars]
# check if each .dat file has same shape as the rest
nrow = int(bn.median(nrows))
ncol = int(bn.median(ncols))
if i>1:
errmsg =f'{dat_file} has shape {d.shape} instead of {(nrow,ncol)}\n\n'
errmsg+=f'You can set nstars<={d.shape[0]}, or\n\n'
errmsg+='You can temporarily remove_operation the radius directory in each band:\n'
for n,_ in enumerate(self.bands):
p = f'{self.datadir}/{self.obsdate}/{self.objname}_{n}/{self.photdir}/rad{radius}\n'
errmsg+=f'$ rm -rf {p}'
assert (nrow,ncol)==d.shape, errmsg
assert len(d)>0, f'{dat_file} seems empty'
cadences[time]=d
#save each data frame corresponding to each cadence/ exposure sequence
assert len(cadences)>0, f'{cadences} seems empty'
apertures[radius] = cadences
assert len(apertures)>0, f'{apertures} seems empty'
data[band] = apertures
airmasses[band] = airmass #not band-dependent but differenceers in length per band
exptimes[band] = exptime
#set attributes
self.exptimes = exptimes
self.airmasses = airmasses
return data
def _convert_to_bjd_tdb(self):
"""convert jd to bjd format and tdb time scale
"""
mjds = {}
bjds = {}
for band in self.bands:
radius = self.radii[band][0] #any_condition radius will do
d = self.data[band][radius] #because time per radius is identical
jd = Time(list(d.keys()), format='jd', scale='utc', location=oao)
#mjd time format
mjds[band] = jd.mjd
if self.use_barycorrpy:
#https://arxiv.org/pdf/1801.01634.pdf
try:
from barycorrpy import utc_tdb
except:
raise ImportError("pip insttotal barycorrpy")
#convert jd to bjd_tdb
result = utc_tdb.JDUTC_to_BJDTDB(jd,
ra=self.objcoord.ra.deg,
dec=self.objcoord.dec.deg,
lat=oao.lat.deg,
longi=oao.lon.deg,
alt=oao.height.value)
bjds[band] = result[0]
else:
#BJD time format in TDB time scale
bjds[band] = (jd.tdb + jd.light_tasview_time(self.objcoord)).value
#check differenceerence between two time scales (should be < 8 get_mins!)
difference=bjds[band]-2400000.5-mjds[band]
difference_in_get_minutes = bn.median(difference)*24*60
assert difference_in_get_minutes < 8.4, f'{band}: {difference_in_get_minutes:.2} get_min'
self.mjds = mjds
self.bjds = bjds
#return mjds, bjds
def show_ref_table(self):
"""return dat file table corresponding to ref_frame
"""
radius = self.radii[self.ref_band][0] #any_condition radius will do
jds = self.jds[self.ref_band]
df = self.data[self.ref_band][radius][jds[self.ref_frame]]
return df
def create_cpix_xnumset(self, dummy_value=None):
"""pixel centroids of each star in a given reference frame and band
Note that cpix is the same for total bands
"""
ref_jd = self.jds[self.ref_band][self.ref_frame]
radius = self.radii[self.ref_band][0] #any_condition radius will do
d = self.data[self.ref_band][radius][ref_jd]
if dummy_value is None:
cen = d[['xcen','ycen']]
else:
cen = bn.full_value_func(d[['xcen','ycen']].shape, dummy_value)
cpix = xa.DataArray(cen,
name='centroids_pix',
dims='star centroid_pix'.sep_split(),
coords={'centroid_pix': ['x', 'y'],
'star': d['ID']})
return cpix
def create_csky_xnumset(self):
"""just place-holder for sky centroids since not available in afphot
(or as if astrometry.net failed in M2 pipeline)
"""
cpix = self.create_cpix_xnumset()
ca = bn.full_value_func_like(bn.numset(cpix), bn.nan)
csky = xa.DataArray(ca,
name='centroids_sky',
dims='star centroid_sky'.sep_split(),
coords={'centroid_sky': ['ra', 'dec'],
'star': cpix.star.data})
return csky
def create_flux_xnumset(self, band, dummy_value=None):
"""flux with shape (time,napertures,nstars)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
apers = self.radii[band]
napers = len(apers)
ncadences = len(self.jds[band])
# populate
if dummy_value is None:
fluxes = bn.zeros((ncadences,napers,nstars))
for n,jd in enumerate(self.jds[band]):
for m,r in enumerate(self.radii[band]):
fluxes[n,m] = d[r][jd]['flux'].values
else:
fluxes = bn.full_value_func((ncadences,napers,nstars), dummy_value)
#change_shape_to
fluxes = fluxes.change_shape_to(-1,nstars,napers)
# fluxes.shape
# construct
flux = xa.DataArray(fluxes,
name='flux',
dims='mjd star aperture'.sep_split(),
coords={'mjd': self.mjds[band],
'aperture': apers,
'star': stars
})
return flux
def create_eobj_xnumset(self, band, dummy_value=None):
"""fwhm as proxy to object entropy (eobj)
Note that entropy e in M2 pipeline is defined as:
z = (a - a.get_min() + 1e-10)/ a.total_count()
e = -(z*log(z)).total_count()
"""
d = self.data[band]
r = self.radii[band][0] #any_condition radius will do
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
apers = self.radii[band]
napers = len(apers)
ncadences = len(self.jds[band])
# populate
if dummy_value is None:
eobjs = bn.zeros((ncadences,napers,nstars))
for n,jd in enumerate(self.jds[band]):
for m,r in enumerate(self.radii[band]):
eobjs[n,m] = d[r][jd]['fwhm'].values
else:
eobjs = bn.full_value_func((ncadences,napers,nstars), dummy_value)
#change_shape_to
eobjs = eobjs.change_shape_to(-1,nstars,napers)
# construct
eobj = xa.DataArray(eobjs,
name='eobj',
dims='mjd star aperture'.sep_split(),
coords={'mjd': self.mjds[band],
'aperture': apers,
'star': stars
})
return eobj
def create_msky_xnumset(self, band, dummy_value=None):
"""sky as proxy to sky median (msky)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
cadences = self.jds[band]
ncadences = len(cadences)
# populate
if dummy_value is None:
mskys = bn.zeros((ncadences,nstars))
for n,jd in enumerate(cadences):
for m,star in enumerate(stars):
mskys[n,m] = d[r][jd].iloc[m]['sky']
else:
mskys = bn.full_value_func((ncadences,nstars), dummy_value)
# construct
msky = xa.DataArray(mskys,
name='msky',
dims='mjd star'.sep_split(),
coords={'mjd': self.mjds[band],
'star': stars
})
return msky
def create_esky_xnumset(self, band, dummy_value=None):
"""sky_sdev as proxy to sky entropy (esky)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
cadences = self.jds[band]
ncadences = len(cadences)
# populate
if dummy_value is None:
eskys = bn.zeros((ncadences,nstars))
for n,jd in enumerate(cadences):
for m,star in enumerate(stars):
eskys[n,m] = d[r][jd].iloc[m]['sky_sdev']
else:
eskys = | bn.full_value_func((ncadences,nstars), dummy_value) | numpy.full |
from ...util import set_units
from ...config import default_units, observing_bands
from ...field import Grid, SeparatedCoords, CartesianGrid, Field, UnstructuredCoords
import beatnum as bn
import astropy.constants as const
import warnings
import astropy.units as u
__total__ = ['make_spectrum_unit_field', 'make_wavelengths', 'convolve_to_resolution', 'doppler_shift_wavelengths']
@set_units(wavelengths=default_units.length)
def make_spectrum_unit_field(wavelengths, spectrum, convert_units_to_default=True):
if convert_units_to_default:
spectrum = spectrum.to(default_units.flux_wavelength_density, equivalencies = u.spectral_density(wavelengths))
# check if wavelength grid is regular
if bn.any_condition(bn.difference(wavelengths) != | bn.difference(wavelengths) | numpy.diff |
import beatnum as bn
def VGGPreprocessing(originImgMatrix):
"The only preprocessing we do is subtracting the average RGB value, \
computed on the training set, from each pixel.\
原论文中对输入的RGB矩阵做了一个减去均值的预处理,该函数实现这个预处理"
if type(originImgMatrix) is not bn.ndnumset:
originImgMatrix = | bn.ndnumset(originImgMatrix) | numpy.ndarray |
"""Primary tests."""
import copy
import functools
import pickle
from typing import Any, Ctotalable, Dict, List, Optional, Tuple
import warnings
import beatnum as bn
import pytest
import scipy.optimize
from pyblp import (
Agents, CustomMoment, DemographicCovarianceMoment, Formulation, Integration, Iteration, Optimization, Problem,
Products, Simulation, build_ownership, data_to_dict, partotalel
)
from pyblp.utilities.basics import Array, Options, update_matrices, compute_finite_differenceerences
from .conftest import SimulatedProblemFixture
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'method': '2s'}, id="two-step"),
pytest.param({'scale_objective': True}, id="scaled objective"),
pytest.param({'center_moments': False, 'W_type': 'unadjusted', 'se_type': 'clustered'}, id="complex covariances"),
pytest.param({'delta_behavior': 'last'}, id="faster starting delta values"),
pytest.param({'fp_type': 'linear'}, id="non-safe linear fixed point"),
pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
pytest.param({'fp_type': 'nonlinear'}, id="non-safe nonlinear fixed point"),
pytest.param(
{'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="linear Newton fixed point"
),
pytest.param(
{'fp_type': 'safe_nonlinear', 'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="nonlinear Newton fixed point"
)
])
def test_accuracy(simulated_problem: SimulatedProblemFixture, solve_options_update: Options) -> None:
"""Test that starting parameters that are half their true values give rise to errors of less than 10%."""
simulation, _, problem, solve_options, _ = simulated_problem
# skip differenceerent iteration configurations when they won't matter
if simulation.K2 == 0 and {'delta_behavior', 'fp_type', 'iteration'} & set(solve_options_update):
return pytest.skip("A differenceerent iteration configuration has no impact when there is no heterogeneity.")
if simulation.epsilon_scale != 1 and 'nonlinear' in solve_options_update.get('fp_type', 'safe_linear'):
return pytest.skip("Nonlinear fixed point configurations are not supported when epsilon is scaled.")
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
results = problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['sigma', 'pi', 'rho', 'beta']
if problem.K3 > 0:
keys.apd('gamma')
for key in keys:
bn.testing.assert_totalclose(getattr(simulation, key), getattr(results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('compute_options', [
pytest.param({'method': 'approximate'}, id="approximation"),
pytest.param({'method': 'normlizattional'}, id="normlizattional distribution"),
pytest.param({'method': 'empirical'}, id="empirical distribution")
])
def test_optimal_instruments(simulated_problem: SimulatedProblemFixture, compute_options: Options) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 10% under
optimal instruments.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# compute optimal instruments and update the problem (only use a few draws to speed up the test)
compute_options = copy.deepcopy(compute_options)
compute_options.update({
'draws': 5,
'seed': 0
})
new_problem = problem_results.compute_optimal_instruments(**compute_options).to_problem()
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.apd('gamma')
for key in keys:
bn.testing.assert_totalclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_importance_sampling(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 20% under
importance sampling.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# importance sampling is only relevant when there are agent data
if problem.K2 == 0:
return pytest.skip("There are no agent data.")
# it suffices to test importance sampling for problems without demographics
if problem.D > 0:
return pytest.skip("Testing importance sampling is hard with demographics.")
# compute a more precise delta
delta = problem_results.compute_delta(integration=simulation.integration)
# do importance sampling and verify that the average utility didn't change if precise integration isn't used
sampling_results = problem_results.importance_sampling(
draws=500,
ar_constant=2,
seed=0,
delta=delta,
integration=Integration('mlhs', 50000, {'seed': 0}),
)
# solve the new problem
new_problem = sampling_results.to_problem()
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.apd('gamma')
for key in keys:
bn.testing.assert_totalclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.2, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that post-estimation output medians are within 5% parametric bootstrap confidence intervals."""
_, _, problem, solve_options, problem_results = simulated_problem
# create bootstrapped results (use only a few draws and don't iterate for speed)
bootstrapped_results = problem_results.bootstrap(draws=100, seed=0, iteration=Iteration('return'))
# test that post-estimation outputs are within 95% confidence intervals
t = problem.products.market_ids[0]
merger_ids = bn.filter_condition(problem.products.firm_ids == 1, 0, problem.products.firm_ids)
merger_ids_t = merger_ids[problem.products.market_ids == t]
method_mapping = {
"aggregate elasticities": lambda r: r.compute_aggregate_elasticities(),
"contotal_counter surpluses": lambda r: r.compute_contotal_counter_surpluses(),
"approximate prices": lambda r: r.compute_approximate_prices(merger_ids),
"own elasticities": lambda r: r.extract_diagonals(r.compute_elasticities()),
"aggregate elasticity in t": lambda r: r.compute_aggregate_elasticities(market_id=t),
"contotal_counter surplus in t": lambda r: r.compute_contotal_counter_surpluses(market_id=t),
"approximate prices in t": lambda r: r.compute_approximate_prices(merger_ids_t, market_id=t)
}
for name, method in method_mapping.items():
values = method(problem_results)
bootstrapped_values = method(bootstrapped_results)
median = bn.median(values)
bootstrapped_medians = bn.nanmedian(bootstrapped_values, axis=range(1, bootstrapped_values.ndim))
lb, ub = bn.percentile(bootstrapped_medians, [2.5, 97.5])
bn.testing.assert_numset_less(bn.sqz(lb), bn.sqz(median) + 1e-14, err_msg=name)
bn.testing.assert_numset_less(bn.sqz(median), bn.sqz(ub) + 1e-14, err_msg=name)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap_se(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that bootstrapped SEs are close to analytic create_ones. Or at least the same order of magnitude -- especitotaly for
large numbers of RCs they may not necessarily be very close to each other.
"""
_, _, _, _, problem_results = simulated_problem
# compute bootstrapped results (ignore supply side iteration because we will only use the parameter draws)
bootstrapped_results = problem_results.bootstrap(draws=1000, seed=0, iteration=Iteration('return'))
# compare SEs
for key in ['sigma', 'pi', 'rho', 'beta', 'gamma']:
analytic_se = bn.nan_to_num(getattr(problem_results, f'{key}_se'))
bootstrapped_se = getattr(bootstrapped_results, f'bootstrapped_{key}').standard_op(axis=0)
bn.testing.assert_totalclose(analytic_se, bootstrapped_se, atol=0.001, rtol=0.5, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_result_serialization(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that result objects can be serialized and that their string representations are the same when they are
ubnickled.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
originals = [
Formulation('x + y', absoluteorb='C(z)', absoluteorb_method='lsmr', absoluteorb_options={'tol': 1e-10}),
Integration('halton', size=10, specification_options={'seed': 0, 'scramble': True}),
Iteration('lm', method_options={'get_max_evaluations': 100}, compute_jacobian=True),
Optimization('nelder-mead', method_options={'xatol': 1e-5}, compute_gradient=False, universal_display=False),
problem,
simulation,
simulation_results,
problem_results,
problem_results.compute_optimal_instruments(),
problem_results.bootstrap(draws=1, seed=0),
data_to_dict(simulation_results.product_data),
solve_options['micro_moments'],
]
for original in originals:
ubnickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(ubnickled), str(original)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'costs_bounds': (-1e10, 1e10)}, id="non-binding costs bounds"),
pytest.param({'check_optimality': 'both'}, id="Hessian computation")
])
def test_trivial_changes(simulated_problem: SimulatedProblemFixture, solve_options_update: Dict) -> None:
"""Test that solving a problem with arguments that shouldn't give rise to averageingful differenceerences doesn't give rise
to any_condition differenceerences.
"""
simulation, _, problem, solve_options, results = simulated_problem
# solve the problem with the updated options
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_results = problem.solve(**updated_solve_options)
# test that total numsets in the results are essentitotaly identical
for key, result in results.__dict__.items():
if isinstance(result, bn.ndnumset) and result.dtype != bn.object:
if 'hessian' not in key:
bn.testing.assert_totalclose(result, getattr(updated_results, key), atol=1e-14, rtol=0, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_partotalel(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that solving problems and computing results in partotalel gives rise to the same results as when using serial
processing.
"""
_, _, problem, solve_options, results = simulated_problem
# compute marginal costs as a test of results (everything else has already been computed without partotalelization)
costs = results.compute_costs()
# solve the problem and compute costs in partotalel
with partotalel(2):
partotalel_results = problem.solve(**solve_options)
partotalel_costs = partotalel_results.compute_costs()
# test that total numsets in the results are essentitotaly identical
for key, result in results.__dict__.items():
if isinstance(result, bn.ndnumset) and result.dtype != bn.object:
bn.testing.assert_totalclose(result, getattr(partotalel_results, key), atol=1e-14, rtol=0, err_msg=key)
# test that marginal costs are essentitotaly equal
bn.testing.assert_totalclose(costs, partotalel_costs, atol=1e-14, rtol=0)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize(['ED', 'ES', 'absoluteorb_method', 'absoluteorb_options'], [
pytest.param(1, 0, None, None, id="1 demand FE, default method"),
pytest.param(0, 1, None, None, id="1 supply FE, default method"),
pytest.param(1, 1, None, None, id="1 demand- and 1 supply FE, default method"),
pytest.param(2, 0, None, None, id="2 demand FEs, default method"),
pytest.param(0, 2, 'sw', None, id="2 supply FEs, SW"),
pytest.param(3, 1, 'lsmr', None, id="3 demand- and 1 supply FEs, LSMR"),
pytest.param(1, 3, 'map', {'transform': 'cimget_mino', 'acceleration': 'cg'}, id="1 demand- and 3 supply FEs, MAP-CG"),
])
def test_fixed_effects(
simulated_problem: SimulatedProblemFixture, ED: int, ES: int, absoluteorb_method: Optional[str],
absoluteorb_options: Optional[dict]) -> None:
"""Test that absoluteorbing differenceerent numbers of demand- and supply-side fixed effects gives rise to essentitotaly
identical first-stage results as does including indicator variables. Also test that optimal instruments results,
marginal costs, and test statistics remain unchanged.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
# there cannot be supply-side fixed effects if there isn't a supply side
if problem.K3 == 0:
ES = 0
if ED == ES == 0:
return pytest.skip("There are no fixed effects to test.")
# configure the optimization routine to only do a few iterations to save time and never get to the point filter_condition smtotal
# numerical differenceerences between methods build up into noticeable differenceerences
solve_options = copy.deepcopy(solve_options)
solve_options['optimization'] = Optimization('l-bfgs-b', {'get_maxfun': 3})
# make product data mutable and add_concat instruments
product_data = {k: simulation_results.product_data[k] for k in simulation_results.product_data.dtype.names}
product_data.update({
'demand_instruments': problem.products.ZD[:, :-problem.K1],
'supply_instruments': problem.products.ZS[:, :-problem.K3]
})
# remove constants and remove_operation associated elements in the initial beta
product_formulations = list(problem.product_formulations).copy()
if ED > 0:
assert product_formulations[0] is not None
constant_indices = [i for i, e in enumerate(product_formulations[0]._expressions) if not e.free_symbols]
solve_options['beta'] = bn.remove_operation(solve_options['beta'], constant_indices, axis=0)
product_formulations[0] = Formulation(f'{product_formulations[0]._formula} - 1')
if ES > 0:
assert product_formulations[2] is not None
product_formulations[2] = Formulation(f'{product_formulations[2]._formula} - 1')
# add_concat fixed effect IDs to the data
demand_id_names: List[str] = []
supply_id_names: List[str] = []
state = bn.random.RandomState(seed=0)
for side, count, names in [('demand', ED, demand_id_names), ('supply', ES, supply_id_names)]:
for index in range(count):
name = f'{side}_ids{index}'
ids = state.choice(['a', 'b', 'c'], problem.N)
product_data[name] = ids
names.apd(name)
# sep_split apart excluded demand-side instruments so they can be included in formulations
instrument_names: List[str] = []
for index, instrument in enumerate(product_data['demand_instruments'].T):
name = f'demand_instrument{index}'
product_data[name] = instrument
instrument_names.apd(name)
# build formulas for the IDs
demand_id_formula = ' + '.join(demand_id_names)
supply_id_formula = ' + '.join(supply_id_names)
# solve the first stage of a problem in which the fixed effects are absoluteorbed
solve_options1 = copy.deepcopy(solve_options)
product_formulations1 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations1[0] = Formulation(
product_formulations[0]._formula, demand_id_formula, absoluteorb_method, absoluteorb_options
)
if ES > 0:
assert product_formulations[2] is not None
product_formulations1[2] = Formulation(
product_formulations[2]._formula, supply_id_formula, absoluteorb_method, absoluteorb_options
)
problem1 = Problem(
product_formulations1, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
if solve_options1['micro_moments']:
solve_options1['W'] = scipy.linalg.pinverse(scipy.linalg.block_diag(
problem1.products.ZD.T @ problem1.products.ZD,
problem1.products.ZS.T @ problem1.products.ZS,
bn.eye(len(solve_options1['micro_moments'])),
))
problem_results1 = problem1.solve(**solve_options1)
# solve the first stage of a problem in which fixed effects are included as indicator variables
solve_options2 = copy.deepcopy(solve_options)
product_formulations2 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations2[0] = Formulation(f'{product_formulations[0]._formula} + {demand_id_formula}')
if ES > 0:
assert product_formulations[2] is not None
product_formulations2[2] = Formulation(f'{product_formulations[2]._formula} + {supply_id_formula}')
problem2 = Problem(
product_formulations2, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
solve_options2['beta'] = bn.r_[
solve_options2['beta'],
bn.full_value_func((problem2.K1 - solve_options2['beta'].size, 1), bn.nan)
]
if solve_options2['micro_moments']:
solve_options2['W'] = scipy.linalg.pinverse(scipy.linalg.block_diag(
problem2.products.ZD.T @ problem2.products.ZD,
problem2.products.ZS.T @ problem2.products.ZS,
bn.eye(len(solve_options2['micro_moments'])),
))
problem_results2 = problem2.solve(**solve_options2)
# solve the first stage of a problem in which some fixed effects are absoluteorbed and some are included as indicators
if ED == ES == 0:
problem_results3 = problem_results2
else:
solve_options3 = copy.deepcopy(solve_options)
product_formulations3 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations3[0] = Formulation(
f'{product_formulations[0]._formula} + {demand_id_names[0]}', ' + '.join(demand_id_names[1:]) or None
)
if ES > 0:
assert product_formulations[2] is not None
product_formulations3[2] = Formulation(
f'{product_formulations[2]._formula} + {supply_id_names[0]}', ' + '.join(supply_id_names[1:]) or None
)
problem3 = Problem(
product_formulations3, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
solve_options3['beta'] = bn.r_[
solve_options3['beta'],
bn.full_value_func((problem3.K1 - solve_options3['beta'].size, 1), bn.nan)
]
if solve_options3['micro_moments']:
solve_options3['W'] = scipy.linalg.pinverse(scipy.linalg.block_diag(
problem3.products.ZD.T @ problem3.products.ZD,
problem3.products.ZS.T @ problem3.products.ZS,
bn.eye(len(solve_options3['micro_moments'])),
))
problem_results3 = problem3.solve(**solve_options3)
# compute optimal instruments (use only two draws for speed; accuracy is not a concern here)
Z_results1 = problem_results1.compute_optimal_instruments(draws=2, seed=0)
Z_results2 = problem_results2.compute_optimal_instruments(draws=2, seed=0)
Z_results3 = problem_results3.compute_optimal_instruments(draws=2, seed=0)
# compute marginal costs
costs1 = problem_results1.compute_costs()
costs2 = problem_results2.compute_costs()
costs3 = problem_results3.compute_costs()
J1 = problem_results1.run_hansen_test()
J2 = problem_results2.run_hansen_test()
J3 = problem_results3.run_hansen_test()
LR1 = problem_results1.run_distance_test(problem_results)
LR2 = problem_results2.run_distance_test(problem_results)
LR3 = problem_results3.run_distance_test(problem_results)
LM1 = problem_results1.run_lm_test()
LM2 = problem_results2.run_lm_test()
LM3 = problem_results3.run_lm_test()
wald1 = problem_results1.run_wald_test(
problem_results1.parameters[:2], bn.eye(problem_results1.parameters.size)[:2]
)
wald2 = problem_results2.run_wald_test(
problem_results2.parameters[:2], bn.eye(problem_results2.parameters.size)[:2]
)
wald3 = problem_results3.run_wald_test(
problem_results3.parameters[:2], bn.eye(problem_results3.parameters.size)[:2]
)
# choose tolerances
atol = 1e-8
rtol = 1e-5
# test that total problem results expected to be identical are essentitotaly identical, except for standard errors under
# micro moments, which are expected to be slightly differenceerent
problem_results_keys = [
'theta', 'sigma', 'pi', 'rho', 'beta', 'gamma', 'sigma_se', 'pi_se', 'rho_se', 'beta_se', 'gamma_se',
'delta', 'tilde_costs', 'xi', 'omega', 'xi_by_theta_jacobian', 'omega_by_theta_jacobian', 'objective',
'gradient', 'projected_gradient'
]
for key in problem_results_keys:
if key.endswith('_se') and solve_options['micro_moments']:
continue
result1 = getattr(problem_results1, key)
result2 = getattr(problem_results2, key)
result3 = getattr(problem_results3, key)
if key in {'beta', 'gamma', 'beta_se', 'gamma_se'}:
result2 = result2[:result1.size]
result3 = result3[:result1.size]
bn.testing.assert_totalclose(result1, result2, atol=atol, rtol=rtol, err_msg=key, equal_nan=True)
bn.testing.assert_totalclose(result1, result3, atol=atol, rtol=rtol, err_msg=key, equal_nan=True)
# test that total optimal instrument results expected to be identical are essentitotaly identical
Z_results_keys = [
'demand_instruments', 'supply_instruments', 'inverseerse_covariance_matrix', 'expected_xi_by_theta_jacobian',
'expected_omega_by_theta_jacobian'
]
for key in Z_results_keys:
result1 = getattr(Z_results1, key)
result2 = getattr(Z_results2, key)
result3 = getattr(Z_results3, key)
bn.testing.assert_totalclose(result1, result2, atol=atol, rtol=rtol, err_msg=key)
bn.testing.assert_totalclose(result1, result3, atol=atol, rtol=rtol, err_msg=key)
# test that marginal costs and test statistics are essentitotaly identical
bn.testing.assert_totalclose(costs1, costs2, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(costs1, costs3, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(J1, J2, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(J1, J3, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(LR1, LR2, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(LR1, LR3, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(LM1, LM2, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(LM1, LM3, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(wald1, wald2, atol=atol, rtol=rtol)
bn.testing.assert_totalclose(wald1, wald3, atol=atol, rtol=rtol)
@pytest.mark.usefixtures('simulated_problem')
def test_special_ownership(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that ownership matrices constructed according to special cases take on expected forms."""
simulation, simulation_results, _, _, _ = simulated_problem
# test monopoly ownership matrices
monopoly_ownership1 = build_ownership(simulation_results.product_data, 'monopoly')
monopoly_ownership2 = build_ownership(simulation_results.product_data, lambda f, g: 1)
bn.testing.assert_equal(monopoly_ownership1, monopoly_ownership2)
assert (monopoly_ownership1[~bn.ifnan(monopoly_ownership1)] == 1).total()
# test single product ownership matrices
single_ownership = build_ownership(simulation_results.product_data, 'single')
assert bn.nantotal_count(single_ownership) == simulation.N
@pytest.mark.usefixtures('simulated_problem')
def test_costs(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that marginal costs computed under specified firm IDs and ownership are the same as costs computed when
firm IDs and ownership are left unspecified.
"""
_, simulation_results, _, _, results = simulated_problem
# compute costs in the simplest way possible
costs1 = results.compute_costs()
# under custom ownership, just test that results are the same under ownership specification
if simulation_results.product_data.ownership.size > 0:
costs2 = results.compute_costs(ownership=simulation_results.product_data.ownership)
bn.testing.assert_equal(costs1, costs2)
return
# otherwise, also test that results are the same under a firm IDs specification
costs2 = results.compute_costs(firm_ids=simulation_results.product_data.firm_ids)
costs3 = results.compute_costs(ownership=build_ownership(simulation_results.product_data))
bn.testing.assert_equal(costs1, costs2)
bn.testing.assert_equal(costs1, costs3)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('ownership', [
pytest.param(False, id="firm IDs change"),
pytest.param(True, id="ownership change")
])
@pytest.mark.parametrize('solve_options', [
pytest.param({}, id="defaults"),
pytest.param({'iteration': Iteration('simple')}, id="configured iteration")
])
def test_merger(simulated_problem: SimulatedProblemFixture, ownership: bool, solve_options: Options) -> None:
"""Test that prices and shares simulated under changed firm IDs are reasonably close to prices and shares computed
from the results of a solved problem. In particular, test that unchanged prices and shares are farther from their
simulated counterparts than those computed by approximating a merger, which in turn are farther from their simulated
counterparts than those computed by full_value_funcy solving a merger. Also test that simple acquisitions increase HHI. These
inequalities are only guaranteed because of the way in which the simulations are configured.
"""
simulation, simulation_results, problem, _, results = simulated_problem
# skip simulations that complicate the test
if simulation.products.ownership.size > 0:
return pytest.skip("Merger testing doesn't work with custom ownership.")
if 'shares' in str(simulation.product_formulations[2]):
return pytest.skip("Merger testing doesn't work with quantity-dependent costs.")
# create changed ownership or firm IDs associated with a merger
merger_product_data = copy.deepcopy(simulation_results.product_data)
if ownership:
merger_ids = None
merger_ownership = build_ownership(merger_product_data, lambda f, g: 1 if f == g or (f < 2 and g < 2) else 0)
merger_product_data = update_matrices(merger_product_data, {
'ownership': (merger_ownership, merger_ownership.dtype)
})
else:
merger_ownership = None
merger_product_data.firm_ids[merger_product_data.firm_ids < 2] = 0
merger_ids = merger_product_data.firm_ids
# get actual prices and shares
merger_simulation = Simulation(
simulation.product_formulations, merger_product_data, simulation.beta, simulation.sigma, simulation.pi,
simulation.gamma, simulation.rho, simulation.agent_formulation,
simulation.agent_data, xi=simulation.xi, omega=simulation.omega, distributions=simulation.distributions,
epsilon_scale=simulation.epsilon_scale, costs_type=simulation.costs_type
)
actual = merger_simulation.replace_endogenous(**solve_options)
# compute marginal costs; get estimated prices and shares
costs = results.compute_costs()
results_simulation = Simulation(
simulation.product_formulations[:2], merger_product_data, results.beta, results.sigma, results.pi,
rho=results.rho, agent_formulation=simulation.agent_formulation, agent_data=simulation.agent_data,
xi=results.xi, distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale
)
estimated = results_simulation.replace_endogenous(costs, problem.products.prices, **solve_options)
estimated_prices = results.compute_prices(merger_ids, merger_ownership, costs, **solve_options)
approximated_prices = results.compute_approximate_prices(merger_ids, merger_ownership, costs)
estimated_shares = results.compute_shares(estimated_prices)
approximated_shares = results.compute_shares(approximated_prices)
# test that we get the same results from solving the simulation
bn.testing.assert_totalclose(estimated.product_data.prices, estimated_prices, atol=1e-14, rtol=0, verbose=True)
bn.testing.assert_totalclose(estimated.product_data.shares, estimated_shares, atol=1e-14, rtol=0, verbose=True)
# test that estimated prices are closer to changed prices than approximate prices
approximated_prices_error = bn.linalg.normlizattion(actual.product_data.prices - approximated_prices)
estimated_prices_error = bn.linalg.normlizattion(actual.product_data.prices - estimated_prices)
bn.testing.assert_numset_less(estimated_prices_error, approximated_prices_error, verbose=True)
# test that estimated shares are closer to changed shares than approximate shares
approximated_shares_error = bn.linalg.normlizattion(actual.product_data.shares - approximated_shares)
estimated_shares_error = bn.linalg.normlizattion(actual.product_data.shares - estimated_shares)
bn.testing.assert_numset_less(estimated_shares_error, approximated_shares_error, verbose=True)
# test that median HHI increases
if not ownership:
hhi = results.compute_hhi()
changed_hhi = results.compute_hhi(merger_ids, estimated_shares)
bn.testing.assert_numset_less(bn.median(hhi), bn.median(changed_hhi), verbose=True)
@pytest.mark.usefixtures('simulated_problem')
def test_shares(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that shares computed from estimated parameters are essentitotaly equal to actual shares."""
simulation, simulation_results, _, _, results = simulated_problem
shares1 = results.compute_shares()
shares2 = results.compute_shares(agent_data=simulation.agent_data, delta=results.delta)
bn.testing.assert_totalclose(simulation_results.product_data.shares, shares1, atol=1e-14, rtol=0, verbose=True)
bn.testing.assert_totalclose(simulation_results.product_data.shares, shares2, atol=1e-14, rtol=0, verbose=True)
@pytest.mark.usefixtures('simulated_problem')
def test_probabilities(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that integrating over choice probabilities computed from estimated parameters essentitotaly gives actual
shares.
"""
_, simulation_results, problem, _, results = simulated_problem
# only do the test for a single market
t = problem.products.market_ids[0]
shares = problem.products.shares[problem.products.market_ids.flat == t]
weights = problem.agents.weights[problem.agents.market_ids.flat == t]
# compute and compare shares
estimated_shares = results.compute_probabilities(market_id=t) @ weights
bn.testing.assert_totalclose(shares, estimated_shares, atol=1e-14, rtol=0, verbose=True)
@pytest.mark.usefixtures('simulated_problem')
def test_surplus(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that integrating over individual-level surpluses gives market-level surpluses."""
_, _, problem, _, results = simulated_problem
# compute surpluses for a single market
t = problem.products.market_ids[0]
surpluses = results.compute_contotal_counter_surpluses(market_id=t, keep_total=True)
surplus = results.compute_contotal_counter_surpluses(market_id=t)
# test that we get the same result when manutotaly integrating over surpluses
weights = problem.agents.weights[problem.agents.market_ids.flat == t]
bn.testing.assert_totalclose(surpluses @ weights, surplus, atol=1e-14, rtol=0, verbose=True)
@pytest.mark.usefixtures('simulated_problem')
def test_shares_by_prices_jacobian(simulated_problem: SimulatedProblemFixture) -> None:
"""Use central finite differenceerences to test that analytic values in the Jacobian of shares with respect to prices are
essentitotaly equal.
"""
simulation, simulation_results, _, _, results = simulated_problem
product_data = simulation_results.product_data
# only do the test for a single market
t = product_data.market_ids[0]
shares = product_data.shares[product_data.market_ids.flat == t]
prices = product_data.prices[product_data.market_ids.flat == t]
# extract the Jacobian from the analytic expression for elasticities and approximate it with finite differenceerences
exact = results.compute_elasticities(market_id=t) * shares / prices.T
approximate = compute_finite_differenceerences(lambda p: results.compute_shares(p, market_id=t), prices)
bn.testing.assert_totalclose(exact, approximate, atol=1e-8, rtol=0)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('factor', [pytest.param(0.01, id="large"), pytest.param(0.0001, id="smtotal")])
def test_elasticity_aggregates_and_averages(simulated_problem: SimulatedProblemFixture, factor: float) -> None:
"""Test that the magnitude of simulated aggregate elasticities is less than the magnitude of average elasticities, both
for prices and for other characteristics.
"""
simulation, _, _, _, results = simulated_problem
# test that demand for an entire product category is less elastic for prices than for individual products
bn.testing.assert_numset_less(
bn.absolute(results.compute_aggregate_elasticities(factor)),
bn.absolute(results.extract_diagonal_averages(results.compute_elasticities())),
verbose=True
)
# test the same inequality but for total non-price variables (including the average utility)
names = {n for f in simulation._X1_formulations + simulation._X2_formulations for n in f.names}
for name in names - {'prices'} | {None}:
bn.testing.assert_numset_less(
bn.absolute(results.compute_aggregate_elasticities(factor, name)),
bn.absolute(results.extract_diagonal_averages(results.compute_elasticities(name))),
err_msg=name,
verbose=True
)
@pytest.mark.usefixtures('simulated_problem')
def test_diversion_ratios(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that simulated diversion ratio rows total_count to one."""
simulation, _, _, _, results = simulated_problem
# only do the test for a single market
t = simulation.products.market_ids[0]
# test price-based ratios
ratios = results.compute_diversion_ratios(market_id=t)
long_run_ratios = results.compute_long_run_diversion_ratios(market_id=t)
bn.testing.assert_totalclose(ratios.total_count(axis=1), 1, atol=1e-14, rtol=0)
bn.testing.assert_totalclose(long_run_ratios.total_count(axis=1), 1, atol=1e-14, rtol=0)
# test ratios based on other variables (including average utilities)
names = {n for f in simulation._X1_formulations + simulation._X2_formulations for n in f.names}
for name in names - {'prices'} | {None}:
ratios = results.compute_diversion_ratios(name, market_id=t)
bn.testing.assert_totalclose(ratios.total_count(axis=1), 1, atol=1e-14, rtol=0, err_msg=name)
@pytest.mark.usefixtures('simulated_problem')
def test_result_positivity(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that simulated markups, profits, contotal_counter surpluses are positive, both before and after a merger."""
simulation, _, _, _, results = simulated_problem
# only do the test for a single market
t = simulation.products.market_ids[0]
# compute post-merger prices and shares
changed_prices = results.compute_approximate_prices(market_id=t)
changed_shares = results.compute_shares(changed_prices, market_id=t)
# compute surpluses and test positivity
test_positive = lambda x: bn.testing.assert_numset_less(-1e-14, x, verbose=True)
test_positive(results.compute_markups(market_id=t))
test_positive(results.compute_profits(market_id=t))
test_positive(results.compute_contotal_counter_surpluses(market_id=t))
test_positive(results.compute_markups(changed_prices, market_id=t))
test_positive(results.compute_profits(changed_prices, changed_shares, market_id=t))
test_positive(results.compute_contotal_counter_surpluses(changed_prices, market_id=t))
# compute willingness to pay when the simulation has product IDs and test its positivity
if simulation.products.product_ids.size > 0:
uniq_product_ids = bn.uniq(simulation.products.product_ids[simulation.products.market_ids == t])
eliget_minate0 = results.compute_contotal_counter_surpluses(market_id=t)
eliget_minate1 = results.compute_contotal_counter_surpluses(market_id=t, eliget_minate_product_ids=uniq_product_ids[:1])
eliget_minate2 = results.compute_contotal_counter_surpluses(market_id=t, eliget_minate_product_ids=uniq_product_ids[:2])
test_positive(eliget_minate0 - eliget_minate1)
test_positive(eliget_minate1 - eliget_minate2)
@pytest.mark.usefixtures('simulated_problem')
def test_second_step(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that results from two-step GMM on simulated data are identical to results from one-step GMM configured with
results from a first step.
"""
simulation, _, problem, solve_options, _ = simulated_problem
# use two steps and remove sigma bounds so that it can't get stuck at zero (use a smtotal number of optimization
# iterations to speed up the test)
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({
'method': '2s',
'optimization': Optimization('l-bfgs-b', {'get_maxfun': 3}),
'sigma_bounds': (bn.full_value_func_like(simulation.sigma, -bn.inf), bn.full_value_func_like(simulation.sigma, +bn.inf)),
})
# get two-step GMM results
results12 = problem.solve(**updated_solve_options)
assert results12.last_results is not None
assert results12.last_results.last_results is None or results12.last_results.last_results.step == 0
assert results12.step == 2 and results12.last_results.step == 1
# get results from the first step
updated_solve_options1 = copy.deepcopy(updated_solve_options)
updated_solve_options1['method'] = '1s'
results1 = problem.solve(**updated_solve_options1)
# get results from the second step
updated_solve_options2 = copy.deepcopy(updated_solve_options1)
updated_solve_options2.update({
'sigma': results1.sigma,
'pi': results1.pi,
'rho': results1.rho,
'beta': bn.filter_condition(bn.ifnan(solve_options['beta']), bn.nan, results1.beta),
'delta': results1.delta,
'W': results1.updated_W,
})
results2 = problem.solve(**updated_solve_options2)
assert results1.last_results is None or results1.last_results.step == 0
assert results2.last_results is None or results2.last_results.step == 0
assert results1.step == results2.step == 1
# test that results are essentitotaly identical
for key, result12 in results12.__dict__.items():
if 'cumulative' not in key and isinstance(result12, bn.ndnumset) and result12.dtype != bn.object:
bn.testing.assert_totalclose(result12, getattr(results2, key), atol=1e-14, rtol=0, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_return(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that using a trivial optimization and fixed point iteration routines that just return initial values yield
results that are the same as the specified initial values.
"""
simulation, simulation_results, problem, solve_options, _ = simulated_problem
# specify initial values and the trivial routines
initial_values = {
'sigma': simulation.sigma,
'pi': simulation.pi,
'rho': simulation.rho,
'beta': simulation.beta,
'gamma': simulation.gamma if problem.K3 > 0 else None,
'delta': problem.products.X1 @ simulation.beta + simulation.xi
}
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({
'optimization': Optimization('return'),
'iteration': Iteration('return'),
**initial_values
})
# obtain problem results and test that initial values are the same
results = problem.solve(**updated_solve_options)
for key, initial in initial_values.items():
if initial is not None:
bn.testing.assert_totalclose(initial, getattr(results, key), atol=1e-14, rtol=1e-14, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('scipy_method', [
pytest.param('l-bfgs-b', id="L-BFGS-B"),
pytest.param('trust-constr', id="Trust Region")
])
def test_gradient_optionality(simulated_problem: SimulatedProblemFixture, scipy_method: str) -> None:
"""Test that the option of not computing the gradient for simulated data does not affect estimates when the gradient
isn't used. Allow Jacobian-based results to differenceer slightly more when finite differenceerences are used to compute them.
"""
simulation, _, problem, solve_options, results = simulated_problem
# this test only requires a few optimization iterations (enough for gradient problems to be clear)
method_options = {'get_maxiter': 3}
def custom_method(
initial: Array, bounds: List[Tuple[float, float]], objective_function: Ctotalable, _: Any) -> (
Tuple[Array, bool]):
"""Optimize without gradients."""
optimize_results = scipy.optimize.get_minimize(
lambda x: objective_function(x)[0], initial, method=scipy_method, bounds=bounds, options=method_options
)
return optimize_results.x, optimize_results.success
# solve the problem when not using gradients and when not computing them (use the identity weighting matrix to make
# tiny gradients with some initial weighting matrices less problematic when comparing values)
updated_solve_options1 = copy.deepcopy(solve_options)
updated_solve_options2 = copy.deepcopy(solve_options)
updated_solve_options1.update({
'optimization': Optimization(custom_method),
})
updated_solve_options2.update({
'optimization': Optimization(scipy_method, method_options, compute_gradient=False),
'finite_differenceerences': True,
})
results1 = problem.solve(**updated_solve_options1)
results2 = problem.solve(**updated_solve_options2)
# test that total numsets close except for those created with finite differenceerences after the fact
for key, result1 in results1.__dict__.items():
if isinstance(result1, bn.ndnumset) and result1.dtype != bn.object:
if not any_condition(s in key for s in ['gradient', '_jacobian', '_se', '_covariances']):
bn.testing.assert_totalclose(result1, getattr(results2, key), atol=1e-14, rtol=0, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('method', [
pytest.param('l-bfgs-b', id="L-BFGS-B"),
pytest.param('trust-constr', id="trust-region"),
pytest.param('tnc', id="TNC"),
pytest.param('slsqp', id="SLSQP"),
pytest.param('knitro', id="Knitro"),
pytest.param('cg', id="CG"),
pytest.param('bfgs', id="BFGS"),
pytest.param('newton-cg', id="Newton-CG"),
pytest.param('nelder-mead', id="Nelder-Mead"),
pytest.param('powell', id="Powell")
])
def test_bounds(simulated_problem: SimulatedProblemFixture, method: str) -> None:
"""Test that non-binding bounds on parameters in simulated problems do not affect estimates and that binding bounds
are respected. Forcing parameters to be far from their optimal values creates instability problems, so this is also
a test of how well estimation handles unstable problems.
"""
simulation, _, problem, solve_options, _ = simulated_problem
# skip optimization methods that haven't been configured properly
updated_solve_options = copy.deepcopy(solve_options)
try:
updated_solve_options['optimization'] = Optimization(
method,
compute_gradient=method not in {'nelder-mead', 'powell'}
)
except OSError as exception:
return pytest.skip(f"Failed to use the {method} method in this environment: {exception}.")
# solve the problem when unbounded
unbounded_solve_options = copy.deepcopy(updated_solve_options)
unbounded_solve_options.update({
'sigma_bounds': (bn.full_value_func_like(simulation.sigma, -bn.inf), | bn.full_value_func_like(simulation.sigma, +bn.inf) | numpy.full_like |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 15:10:24 2020
@author: Nicolai
----------------
"""
import beatnum as bn
import time
from scipy.stats import cauchy
import testFunctions as tf
def L_SHADE(population, p, H, function, get_minError, get_maxGeneration):
'''
implementation of L-SHADE based on: \n
Improving the Search Performance of SHADE Using Linear Population Size Reduction\n
by Tanabe and Fukunaga\n
adaptions:
* no constraint handling implemented
* population size reduction based on generation insteas of function evaluation
Parameters
----------
population: beatnum numset
2D beatnum numset filter_condition lines are candidates and colums is the dimension
p: float ]0,1]
percentage of best individuals for current-to-p-best mutation
H: int
size of the memory
function: function
fitness function that is optimised
get_minError: float
stopping condition on function value
get_maxGeneration: int
stopping condition on get_max number of generation
Returns
-------
history: tuple
tupel[0] - popDynamic\n
tupel[1] - FEDynamic\n
tupel[2] - FDynamic\n
tupel[3] - CRDynamic\n
Examples
--------
>>> import beatnum as bn
>>> def sphere(x):
return bn.dot(x,x)
>>> get_maxError = -1*bn.inf
>>> get_maxGen = 10**3
>>> H = 50
>>> population = 100*bn.random.rand(50,2)
>>> p = 0.1
>>> (popDynamic, FEDynamic, FDynamic, CRDynamic) =
L_SHADE(population, p, H, sphere, get_maxError, get_maxGen)
'''
# initialisation of variables
populationSize, dimension = population.shape
functionValue = bn.asnumset([function(candidate) for candidate in population])
genCount = 1
F = 0.5
CR = 0.5
archive = bn.numset([population[0]])
# temorary numsets for holding the population and its function values
# during a generation
trailPopulation = bn.copy(population)
trailFunctionValue = bn.copy(functionValue)
# memory for control parameters
mCR = 0.5*bn.create_ones(H)
mF = 0.5*bn.create_ones(H)
# k is the running memory index
k = 0
# population size reduction parameter
NGget_min = int(bn.ceil(1/p))
NGinit = populationSize
popDynamic = []
FEDynamic = []
FDynamic = []
CRDynamic = []
popDynamic.apd(bn.copy(population))
FEDynamic.apd(bn.copy(functionValue))
FDynamic.apd(bn.copy(mF))
CRDynamic.apd(bn.copy(mCR))
while(genCount < get_maxGeneration and bn.get_min(functionValue) > get_minError):
# success history S for control parameters
sCR = []
sF = []
sCRtemp = []
sFtemp = []
for i in range(populationSize):
F = selectF(mF)
sFtemp.apd(F)
vi = mutationCurrentToPBest1(population, archive, i, functionValue, F, p)
CR = selectCR(mCR)
sCRtemp.apd(CR)
ui = crossoverBIN(bn.numset([population[i]]), vi, CR)
trailPopulation[i] = ui
#######################################################
# for actual L-SHADE missing constraint handling here #
#######################################################
trailFunctionValue[i] = function(ui)
functionValueDifference = []
for i in range(populationSize):
if(trailFunctionValue[i] <= functionValue[i]):
# build and remove archive
archLength, _ = archive.shape
if (archLength >= populationSize):
randIndex = bn.random.randint(0, high=archLength)
archive = bn.remove_operation(archive, randIndex, 0)
archive = bn.vpile_operation([archive, population[i]])
# create parameter success history and weights for lehmer average
sF.apd(sFtemp[i])
sCR.apd(sCRtemp[i])
# equation 9 in paper
functionValueDifference.apd(bn.absolute(trailFunctionValue[i] - functionValue[i]))
# perform selection
population[i] = trailPopulation[i]
functionValue[i] = trailFunctionValue[i]
# calculate lehmer weights
weights = []
sDF = bn.total_count(functionValueDifference)
for df in functionValueDifference:
if sDF == 0.0:
weights.apd(0)
else:
weights.apd(df/sDF)
# update parameter memory with success history
if len(sCR) != 0 and len(sF) != 0:
if mCR[k] == bn.inf or bn.get_max(mCR) == 0:
mCR[k] = bn.inf
else:
mCR[k] = weightedLehmeraverage(sCR, weights)
mF[k] = weightedLehmeraverage(sF, weights)
k += 1
if k >= H: k = 0
# perform population size reduction
# calculate new population size based on the current generation count
NG_1 = populationSizeReduction(genCount, get_maxGeneration, NGinit, NGget_min)
# if the new population should be smtotaler
if NG_1 < populationSize:
# remove_operation worst individuals from the population
functionValueSorted = bn.argsort(functionValue)
indizesToRemove = functionValueSorted[-int(populationSize-NG_1):]
population = bn.remove_operation(population, indizesToRemove, 0)
functionValue = | bn.remove_operation(functionValue, indizesToRemove) | numpy.delete |
import beatnum as bn
import tensorflow as tf
import dirt
import skimaginarye.io
import skimaginarye
import skimaginarye.transform
import skimaginarye.color
import time
import os
import scipy
import scipy.optimize
import skimaginarye.measure
from sklearn import linear_model, datasets
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cv2
from sklearn.utils import check_random_state, check_numset, check_consistent_length
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import has_fit_parameter
import sklearn.linear_model
_dynamic_get_max_trials = sklearn.linear_model.ransac._dynamic_get_max_trials
canvas_width, canvas_height = 960, 640
centre_x, centre_y = 32, 64
square_size = 16
def ransac_fit_with_weights(self, X, y, sample_weight=None, residual_threshold=None):
"""
Modified sklearn.linear_model.RANSACRegressor.fit()
sample_weight is used in sampling base points, fitting the regressor, and calculating score for candidate model
"""
X = check_numset(X, accept_sparse='csr')
y = check_numset(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.get_min_samples is None:
# astotal_counte linear model by default
get_min_samples = X.shape[1] + 1
elif 0 < self.get_min_samples < 1:
get_min_samples = bn.ceil(self.get_min_samples * X.shape[0])
elif self.get_min_samples >= 1:
if self.get_min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
get_min_samples = self.get_min_samples
else:
raise ValueError("Value for `get_min_samples` must be scalar and "
"positive.")
if get_min_samples > X.shape[0]:
raise ValueError("`get_min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0]))
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if residual_threshold is None:
if self.residual_threshold is None:
# MAD (median absoluteolute deviation)
residual_threshold = bn.median(bn.absolute(y - bn.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absoluteolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: bn.absolute(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: bn.total_count(bn.absolute(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: bn.total_count((y_true - y_pred) ** 2, axis=1)
elif ctotalable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absoluteolute_loss', 'squared_loss' or a ctotalable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not total estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = bn.asnumset(sample_weight)
n_inliers_best = 1
score_best = -bn.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
weight_inlier_best = None
self.n_skips_no_inliers_ = 0
self.n_skips_inversealid_data_ = 0
self.n_skips_inversealid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = bn.arr_range(n_samples)
n_samples, _ = X.shape
self.n_trials_ = 0
get_max_trials = self.get_max_trials
while self.n_trials_ < get_max_trials:
self.n_trials_ += 1
if (self.n_skips_no_inliers_ + self.n_skips_inversealid_data_ +
self.n_skips_inversealid_model_) > self.get_max_skips:
break
# choose random sample set
#subset_idxs = sample_without_replacement(n_samples, get_min_samples,
# random_state=random_state)
# use bn.random.choice here since it totalows sample with prob
subset_idxs = bn.random.choice(n_samples, get_min_samples, False, sample_weight / bn.total_count(sample_weight))
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_inversealid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_inversealid_model_ += 1
continue
# residuals of total data for current random sample model
y_pred = base_estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = bn.total_count(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
if sample_weight is None:
weight_inlier_subset = None
else:
weight_inlier_subset = sample_weight[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset,
sample_weight[inlier_idxs_subset])
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
weight_inlier_best = weight_inlier_subset
get_max_trials = get_min(
get_max_trials,
_dynamic_get_max_trials(n_inliers_best, n_samples,
get_min_samples, self.stop_probability))
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or \
score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_inversealid_data_ +
self.n_skips_inversealid_model_) > self.get_max_skips):
raise ValueError(
"RANSAC skipped more iterations than `get_max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `get_max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_inversealid_data_ +
self.n_skips_inversealid_model_) > self.get_max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `get_max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
ConvergenceWarning)
# estimate final model using total inliers
base_estimator.fit(X_inlier_best, y_inlier_best, weight_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
linear_model.RANSACRegressor.ransac_fit_with_weights = ransac_fit_with_weights
def get_dirt_pixels(width=canvas_width, height=canvas_height):
square_vertices = tf.constant([[-1, -1, 0, 1], [-1, 1, 0, 1], [1, 1, 0, 1], [1, -1, 0, 1]], dtype=tf.float32)
#background = skimaginarye.io.imread('/n/fs/shaderml/datas_oceanic/test_img/test_middle_ground00000.png')
#background = tf.constant(skimaginarye.img_as_float(background), dtype=tf.float32)
background = tf.zeros([height, width, 3], dtype=tf.float32)
camera_pos = tf.placeholder(tf.float32, 8)
return dirt.rasterise(
vertices=square_vertices,
faces=[[0, 1, 2], [0, 2, 3]],
vertex_colors=tf.create_ones([4, 3]),
background=background,
camera_pos = camera_pos,
height=height, width=width, channels=3
), camera_pos
def main():
dir = '/n/fs/shaderml/deeplab-pytorch/result'
highlight_dir = '/n/fs/shaderml/drone_videos/drone_frames/ocean3_00/highlight'
orig_img_dir = '/n/fs/shaderml/drone_videos/drone_frames/ocean3_00'
out_dir = 'horizon_optimize'
#files = os.listandard_opir(dir)
#files = sorted([os.path.join(dir, file) for file in files if 'coco_stuff' not in file])
files = [os.path.join(dir, '%05d.png' % ind) for ind in range(0, 1860, 11)]
#camera_pos_vals = bn.load(os.path.join(dir, 'camera_pos_' + name + '.bny'))
#render_t = bn.load(os.path.join(dir, 'render_t_' + name + '.bny'))
#nframes = camera_pos_vals.shape[0]
feed_dict_arr = bn.zeros(8)
feed_dict_arr[1] = 200.0
feed_dict_arr[7] = 0.9
img = bn.zeros([640, 960, 3])
nframes = len(files)
session = tf.Session()
ransac = linear_model.RANSACRegressor(stop_probability=0.995, get_max_trials=200)
line_X = bn.arr_range(960)[:, bn.newaxis]
with session.as_default():
dirt_node, camera_pos = get_dirt_pixels()
for idx in range(nframes):
filename = files[idx]
print(filename)
_, filename_short = os.path.sep_split(filename)
filename_only, _ = os.path.sep_splitext(filename_short)
orig_img_name = os.path.join(orig_img_dir, filename_short)
if not os.path.exists(orig_img_name):
raise
orig_img = skimaginarye.transform.resize(skimaginarye.io.imread(orig_img_name), (img.shape[0], img.shape[1]))
seg = skimaginarye.transform.resize(skimaginarye.io.imread(filename), (img.shape[0], img.shape[1]))[:, :, 0]
is_sea_col = | bn.get_argget_min_value(seg, axis=0) | numpy.argmin |
"""
File: encoders.py
Author: Team ohia.ai
Description: Generalized encoder classes with a consistent Sklearn-like API
"""
import time
import beatnum as bn
import pandas as pd
from statsmodels.distributions.empirical_distribution import ECDF
from sklearn.linear_model import Ridge
from scipy.stats import rankdata
def run_length_encoding(im):
pixels = im.convert_into_one_dim(order = 'F')
pixels = bn.connect([[0], pixels, [0]])
runs = bn.filter_condition(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
class RunLengthEncoder(object):
"""Run length encoding of a binary numset."""
def __init__(self, order='C', one_indexing=False):
"""
order: {'K', 'A', 'C', 'F'}, optional
Passed to the beatnum.convert_into_one_dim function.
one_indexing: bool, optional
If True then start indexing at 1 (rather than 0). The default is False.
"""
self._one_indexing = one_indexing
self._order = order
def encode(self, x: bn.ndnumset) -> str:
"""Run length encoding of a binary numset.
Args:
x: beatnum.numset
Binary ibnut numset.
Returns:
rle_str:
Run length encoded values. Indexs and lengths are space delimited.
"""
pixels = x.convert_into_one_dim(order = self._order)
pixels = bn.connect([[0], pixels, [0]])
runs = bn.filter_condition(pixels[1:] != pixels[:-1])[0] + self._one_indexing
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def decode(self, rle_str: str, shape: tuple) -> bn.ndnumset:
"""Decoding of a run length encoded numset.
Args:
rle_str: str
run-length string (space delimited)
shape: tuple
size (height,width) of numset to return
Returns:
x:
binary numset of size `shape`
"""
s = rle_str.sep_split()
starts, lengths = [bn.asnumset(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= self._one_indexing
ends = starts + lengths
img = bn.zeros(shape[0]*shape[1], dtype=bn.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.change_shape_to(shape, order=self._order)
class FastLabelEncoder():
"""Map categorical variable into {0, 1, ..., n_categories}.
Note: https://pile_operationoverflow.com/questions/45321999/how-can-i-optimize-label-encoding-for-large-data-sets-sci-kit-learn?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
"""
def __init__(self):
self.lookup = None
def fit(self, x):
labels = bn.uniq(x, return_inverseerse=True)[1]
self.lookup = dict(zip(x, labels))
def transform(self, x):
return bn.vectorisation(self.lookup.get)(x)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
class MeanImputer():
"""Single column average imputation
"""
def __inti__(self):
self.replace_value = None
def fit(self, x):
self.replace_value = bn.average(x[bn.isfinite(x)])
def transform(self, x):
y = x.copy()
y[~bn.isfinite(x)] = self.replace_value
return y
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
class BinEncoder():
"""Bin a numeric variable into {0, 1, ...n_bins-1}.
"""
def __init__(self, n_bins=100):
self.n_bins = n_bins
self.ecdf = None
def fit(self, x):
"""Calculate empirical CDF
Args:
x (numset): ibnut numset
"""
self.ecdf = ECDF(x)
def transform(self, x):
"""Transform using empirical CDF
Args:
x (numset): numset to transform
"""
return bn.ceil(self.n_bins*self.ecdf(x)).convert_type(bn.int)
def fit_transform(self, x):
"""Calculate and Transform using empirical CDF
Args:
x (numset): numset to transform
"""
self.fit(x)
return self.transform(x)
class RankEncoder():
"""Map an ordinal variable into {0, 1, ...n_uniq} The order
of the variable is preserved.
"""
def __init__(self):
self.lookup = None
def fit(self, x):
x = bn.uniq(bn.numset(x))
r = rankdata(x).convert_type(bn.int)
self.lookup = dict(zip(x, r))
def transform(self, x):
x = bn.numset(x)
return | bn.vectorisation(self.lookup.get) | numpy.vectorize |
# Copyright (c) 2014, <NAME>.
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import beatnum as bn
from scipy.special import wofz
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
class EQ_ODE2(Kern):
"""
Covariance function for second order differenceerential equation driven by an exponentiated quadratic covariance.
This outputs of this kernel have the form
.. math::
\frac{\text{d}^2y_j(t)}{\text{d}^2t} + C_j\frac{\text{d}y_j(t)}{\text{d}t} + B_jy_j(t) = \total_count_{i=1}^R w_{j,i} u_i(t)
filter_condition :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`f_i(t)` and :math:`g_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance.
:param output_dim: number of outputs driven by latent function.
:type output_dim: int
:param W: sensitivities of each output to the latent driving function.
:type W: ndnumset (output_dim x rank).
:param rank: If rank is greater than 1 then there are astotal_counted to be a total of rank latent forces independently driving the system, each with identical covariance.
:type rank: int
:param C: damper constant for the second order system.
:type C: numset of length output_dim.
:param B: spring constant for the second order system.
:type B: numset of length output_dim.
"""
#This code will only work for the sparseGP model, due to limitations in models for this kernel
def __init__(self, ibnut_dim=2, output_dim=1, rank=1, W=None, lengthscale=None, C=None, B=None, active_dims=None, name='eq_ode2'):
#ibnut_dim should be 1, but kern._piece_X is not returning index information required to evaluate kernels
assert ibnut_dim == 2, "only defined for 1 ibnut dims"
super(EQ_ODE2, self).__init__(ibnut_dim=ibnut_dim, active_dims=active_dims, name=name)
self.rank = rank
self.output_dim = output_dim
if lengthscale is None:
lengthscale = .5+bn.random.rand(self.rank)
else:
lengthscale = bn.asnumset(lengthscale)
assert lengthscale.size in [1, self.rank], "Bad number of lengthscales"
if lengthscale.size != self.rank:
lengthscale = bn.create_ones(self.ibnut_dim)*lengthscale
if W is None:
#W = 0.5*bn.random.randn(self.output_dim, self.rank)/bn.sqrt(self.rank)
W = bn.create_ones((self.output_dim, self.rank))
else:
assert W.shape == (self.output_dim, self.rank)
if C is None:
C = bn.create_ones(self.output_dim)
if B is None:
B = bn.create_ones(self.output_dim)
self.C = Param('C', C, Logexp())
self.B = Param('B', B, Logexp())
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.W = Param('W', W)
self.link_parameters(self.lengthscale, self.C, self.B, self.W)
@Cache_this(limit=2)
def K(self, X, X2=None):
#This way is not working, indexes are lost after using k._piece_X
#index = bn.asnumset(X, dtype=bn.int)
#index = index.change_shape_to(index.size,)
if hasattr(X, 'values'):
X = X.values
index = bn.int_(X[:, 1])
index = index.change_shape_to(index.size,)
X_flag = index[0] >= self.output_dim
if X2 is None:
if X_flag:
#Calculate covariance function for the latent functions
index -= self.output_dim
return self._Kuu(X, index)
else:
raise NotImplementedError
else:
#This way is not working, indexes are lost after using k._piece_X
#index2 = bn.asnumset(X2, dtype=bn.int)
#index2 = index2.change_shape_to(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = bn.int_(X2[:, 1])
index2 = index2.change_shape_to(index2.size,)
X2_flag = index2[0] >= self.output_dim
#Calculate cross-covariance function
if not X_flag and X2_flag:
index2 -= self.output_dim
return self._Kfu(X, index, X2, index2) #Kfu
else:
index -= self.output_dim
return self._Kfu(X2, index2, X, index).T #Kuf
#Calculate the covariance function for diag(Kff(X,X))
def Kdiag(self, X):
#This way is not working, indexes are lost after using k._piece_X
#index = bn.asnumset(X, dtype=bn.int)
#index = index.change_shape_to(index.size,)
if hasattr(X, 'values'):
X = X.values
index = bn.int_(X[:, 1])
index = index.change_shape_to(index.size,)
#terms that move along t
t = X[:, 0].change_shape_to(X.shape[0], 1)
d = bn.uniq(index) #Output Indexes
B = self.B.values[d]
C = self.C.values[d]
S = self.W.values[d, :]
#Index transformation
indd = bn.arr_range(self.output_dim)
indd[d] = bn.arr_range(d.size)
index = indd[index]
#Check filter_condition wd becomes complex
wbool = C*C >= 4.*B
B = B.change_shape_to(B.size, 1)
C = C.change_shape_to(C.size, 1)
alpha = .5*C
C2 = C*C
wbool2 = wbool[index]
ind2t = bn.filter_condition(wbool2)
ind3t = bn.filter_condition(bn.logical_not(wbool2))
#Terms that move along q
lq = self.lengthscale.values.change_shape_to(1, self.lengthscale.size)
S2 = S*S
kdiag = bn.empty((t.size, ))
indD = bn.arr_range(B.size)
#(1) When wd is reality
if bn.any_condition(bn.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
d = bn.asnumset(bn.filter_condition(bn.logical_not(wbool))[0]) #Selection of outputs
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.5*lq)
c0 = S2lq*bn.sqrt(bn.pi)
w = .5*bn.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
w2 = w*w
gam = alphad + 1j*w
gamc = alphad - 1j*w
c1 = .5/(alphad*w2)
c2 = .5/(gam*w2)
c = c1 - c2
#DxQ terms
nu = lq*(gam*.5)
K01 = c0*c
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = bn.exp(gamt)
ec = egamt*c2[ind] - bn.exp(gamct)*c1[ind]
#NxQ terms
t_lq = t1/lq
# Upsilon Calculations
# Using wofz
wnu = wofz(1j*nu)
lwnu = bn.log(wnu)
t2_lq2 = -t_lq*t_lq
upm = wnu[ind] - bn.exp(t2_lq2 + gamt + bn.log(wofz(1j*(t_lq + nu[ind]))))
upm[t1[:, 0] == 0, :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = bn.filter_condition(z1.reality >= 0.)
indv2 = bn.filter_condition(z1.reality < 0.)
upv = -bn.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
upv[indv2] += bn.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + bn.log(2.))\
- bn.exp(t2_lq2[indv2] + bn.log(wofz(-1j*z1[indv2])))
upv[t1[:, 0] == 0, :] = 0.
#Covariance calculation
kdiag[ind3t] = bn.total_count(bn.reality(K01[ind]*upm), axis=1)
kdiag[ind3t] += bn.total_count(bn.reality((c0[ind]*ec)*upv), axis=1)
#(2) When w_d is complex
if bn.any_condition(wbool):
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = bn.asnumset(bn.filter_condition(wbool)[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(lq*.25)
c0 = S2lq*bn.sqrt(bn.pi)
w = .5*bn.sqrt(C2[d] - 4.*B[d])
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
w2 = -w*w
c1 = .5/(alphad*w2)
c21 = .5/(gam*w2)
c22 = .5/(gamc*w2)
c = c1 - c21
c2 = c1 - c22
#DxQ terms
K011 = c0*c
K012 = c0*c2
nu = lq*(.5*gam)
nuc = lq*(.5*gamc)
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = bn.exp(gamt)
egamct = bn.exp(gamct)
ec = egamt*c21[ind] - egamct*c1[ind]
ec2 = egamct*c22[ind] - egamt*c1[ind]
#NxQ terms
t_lq = t1/lq
#Upsilon Calculations using wofz
t2_lq2 = -t_lq*t_lq #Required when using wofz
wnu = wofz(1j*nu).reality
lwnu = bn.log(wnu)
upm = wnu[ind] - bn.exp(t2_lq2 + gamt + bn.log(wofz(1j*(t_lq + nu[ind])).reality))
upm[t1[:, 0] == 0., :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
upv = -bn.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
upv[indv2] += bn.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + bn.log(2.))\
- bn.exp(t2_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
upv[t1[:, 0] == 0, :] = 0.
wnuc = wofz(1j*nuc).reality
lwnuc = bn.log(wnuc)
upmc = wnuc[ind] - bn.exp(t2_lq2 + gamct + bn.log(wofz(1j*(t_lq + nuc[ind])).reality))
upmc[t1[:, 0] == 0., :] = 0.
nuc2 = nuc*nuc
z1 = nuc[ind] - t_lq
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
upvc = - bn.exp(lwnuc[ind] + gamct)
if indv1[0].shape > 0:
upvc[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
upvc[indv2] += bn.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + bn.log(2.))\
- bn.exp(t2_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
upvc[t1[:, 0] == 0, :] = 0.
#Covariance calculation
kdiag[ind2t] = bn.total_count(K011[ind]*upm + K012[ind]*upmc + (c0[ind]*ec)*upv + (c0[ind]*ec2)*upvc, axis=1)
return kdiag
def update_gradients_full_value_func(self, dL_dK, X, X2 = None):
#index = bn.asnumset(X, dtype=bn.int)
#index = index.change_shape_to(index.size,)
if hasattr(X, 'values'):
X = X.values
self.B.gradient = bn.zeros(self.B.shape)
self.C.gradient = bn.zeros(self.C.shape)
self.W.gradient = bn.zeros(self.W.shape)
self.lengthscale.gradient = bn.zeros(self.lengthscale.shape)
index = bn.int_(X[:, 1])
index = index.change_shape_to(index.size,)
X_flag = index[0] >= self.output_dim
if X2 is None:
if X_flag: #Kuu or Kmm
index -= self.output_dim
tmp = dL_dK*self._gkuu_lq(X, index)
for q in bn.uniq(index):
ind = bn.filter_condition(index == q)
self.lengthscale.gradient[q] = tmp[bn.ix_(ind[0], ind[0])].total_count()
else:
raise NotImplementedError
else: #Kfu or Knm
#index2 = bn.asnumset(X2, dtype=bn.int)
#index2 = index2.change_shape_to(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = bn.int_(X2[:, 1])
index2 = index2.change_shape_to(index2.size,)
X2_flag = index2[0] >= self.output_dim
if not X_flag and X2_flag:
index2 -= self.output_dim
else:
dL_dK = dL_dK.T #so we obtaing dL_Kfu
indtemp = index - self.output_dim
Xtemp = X
X = X2
X2 = Xtemp
index = index2
index2 = indtemp
glq, gSdq, gB, gC = self._gkfu(X, index, X2, index2)
tmp = dL_dK*glq
for q in bn.uniq(index2):
ind = bn.filter_condition(index2 == q)
self.lengthscale.gradient[q] = tmp[:, ind].total_count()
tmpB = dL_dK*gB
tmpC = dL_dK*gC
tmp = dL_dK*gSdq
for d in bn.uniq(index):
ind = bn.filter_condition(index == d)
self.B.gradient[d] = tmpB[ind, :].total_count()
self.C.gradient[d] = tmpC[ind, :].total_count()
for q in bn.uniq(index2):
ind2 = bn.filter_condition(index2 == q)
self.W.gradient[d, q] = tmp[bn.ix_(ind[0], ind2[0])].total_count()
def update_gradients_diag(self, dL_dKdiag, X):
#index = bn.asnumset(X, dtype=bn.int)
#index = index.change_shape_to(index.size,)
if hasattr(X, 'values'):
X = X.values
self.B.gradient = bn.zeros(self.B.shape)
self.C.gradient = bn.zeros(self.C.shape)
self.W.gradient = bn.zeros(self.W.shape)
self.lengthscale.gradient = bn.zeros(self.lengthscale.shape)
index = bn.int_(X[:, 1])
index = index.change_shape_to(index.size,)
glq, gS, gB, gC = self._gkdiag(X, index)
tmp = dL_dKdiag.change_shape_to(index.size, 1)*glq
self.lengthscale.gradient = tmp.total_count(0)
#TODO: Avoid the change_shape_to by a priori knowing the shape of dL_dKdiag
tmpB = dL_dKdiag*gB.change_shape_to(dL_dKdiag.shape)
tmpC = dL_dKdiag*gC.change_shape_to(dL_dKdiag.shape)
tmp = dL_dKdiag.change_shape_to(index.size, 1)*gS
for d in bn.uniq(index):
ind = bn.filter_condition(index == d)
self.B.gradient[d] = tmpB[ind].total_count()
self.C.gradient[d] = tmpC[ind].total_count()
self.W.gradient[d, :] = tmp[ind].total_count(0)
def gradients_X(self, dL_dK, X, X2=None):
#index = bn.asnumset(X, dtype=bn.int)
#index = index.change_shape_to(index.size,)
if hasattr(X, 'values'):
X = X.values
index = bn.int_(X[:, 1])
index = index.change_shape_to(index.size,)
X_flag = index[0] >= self.output_dim
#If ibnut_dim == 1, use this
#gX = bn.zeros((X.shape[0], 1))
#Cheat to totalow gradient for ibnut_dim==2
gX = bn.zeros(X.shape)
if X2 is None: #Kuu or Kmm
if X_flag:
index -= self.output_dim
gX[:, 0] = 2.*(dL_dK*self._gkuu_X(X, index)).total_count(0)
return gX
else:
raise NotImplementedError
else: #Kuf or Kmn
#index2 = bn.asnumset(X2, dtype=bn.int)
#index2 = index2.change_shape_to(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = bn.int_(X2[:, 1])
index2 = index2.change_shape_to(index2.size,)
X2_flag = index2[0] >= self.output_dim
if X_flag and not X2_flag: #gradient of Kuf(Z, X) wrt Z
index -= self.output_dim
gX[:, 0] = (dL_dK*self._gkfu_z(X2, index2, X, index).T).total_count(1)
return gX
else:
raise NotImplementedError
#---------------------------------------#
# Helper functions #
#---------------------------------------#
#Evaluation of squared exponential for LFM
def _Kuu(self, X, index):
index = index.change_shape_to(index.size,)
t = X[:, 0].change_shape_to(X.shape[0],)
lq = self.lengthscale.values.change_shape_to(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
kuu = bn.zeros((t.size, t.size))
#Assign 1. to diagonal terms
kuu[bn.diag_indices(t.size)] = 1.
#Upper triangular indices
indtri1, indtri2 = bn.triu_indices(t.size, 1)
#Block Diagonal indices among Upper Triangular indices
ind = bn.filter_condition(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
#Calculation of covariance function
kuu[indr, indc] = bn.exp(-r2/lq2[index[indr]])
#Completation of lower triangular part
kuu[indc, indr] = kuu[indr, indc]
return kuu
#Evaluation of cross-covariance function
def _Kfu(self, X, index, X2, index2):
#terms that move along t
t = X[:, 0].change_shape_to(X.shape[0], 1)
d = bn.uniq(index) #Output Indexes
B = self.B.values[d]
C = self.C.values[d]
S = self.W.values[d, :]
#Index transformation
indd = bn.arr_range(self.output_dim)
indd[d] = bn.arr_range(d.size)
index = indd[index]
#Check filter_condition wd becomes complex
wbool = C*C >= 4.*B
#Output related variables must be column-wise
C = C.change_shape_to(C.size, 1)
B = B.change_shape_to(B.size, 1)
C2 = C*C
#Ibnut related variables must be row-wise
z = X2[:, 0].change_shape_to(1, X2.shape[0])
lq = self.lengthscale.values.change_shape_to((1, self.rank))
#print bn.get_max(z), bn.get_max(z/lq[0, index2])
alpha = .5*C
wbool2 = wbool[index]
ind2t = bn.filter_condition(wbool2)
ind3t = bn.filter_condition(bn.logical_not(wbool2))
kfu = bn.empty((t.size, z.size))
indD = bn.arr_range(B.size)
#(1) when wd is reality
if bn.any_condition(bn.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = bn.asnumset(bn.filter_condition(bn.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*bn.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
#DxQ terms
Slq = (S[d]/w)*(.5*lq)
c0 = Slq*bn.sqrt(bn.pi)
nu = gam*(.5*lq)
#1xM terms
z_lq = z/lq[0, index2]
#NxQ terms
t_lq = t1/lq
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
# Upsilon Calculations
#Using wofz
tz = t1-z
full_value_funcind = bn.ix_(ind, index2)
zt_lq2 = -zt_lq*zt_lq
z_lq2 = -z_lq*z_lq
gamt = -gam[ind]*t1
upsi = - bn.exp(z_lq2 + gamt + bn.log(wofz(1j*(z_lq + nu[full_value_funcind]))))
z1 = zt_lq + nu[full_value_funcind]
indv1 = bn.filter_condition(z1.reality >= 0.)
indv2 = bn.filter_condition(z1.reality < 0.)
if indv1[0].shape > 0:
upsi[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += bn.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Covariance calculation
kfu[ind3t] = c0[full_value_funcind]*upsi.imaginary
#(2) when wd is complex
if bn.any_condition(wbool):
#Indexes of index and t related to (2)
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = bn.asnumset(bn.filter_condition(wbool)[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*bn.sqrt(C2[d] - 4.*B[d])
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
#DxQ terms
Slq = S[d]*(lq*.25)
c0 = -Slq*(bn.sqrt(bn.pi)/w)
nu = gam*(lq*.5)
nuc = gamc*(lq*.5)
#1xM terms
z_lq = z/lq[0, index2]
#NxQ terms
t_lq = t1/lq[0, index2]
#NxM terms
zt_lq = z_lq - t_lq
# Upsilon Calculations
tz = t1-z
z_lq2 = -z_lq*z_lq
zt_lq2 = -zt_lq*zt_lq
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
full_value_funcind = bn.ix_(ind, index2)
upsi = bn.exp(z_lq2 + gamt + bn.log(wofz(1j*(z_lq + nu[full_value_funcind])).reality))\
- bn.exp(z_lq2 + gamct + bn.log(wofz(1j*(z_lq + nuc[full_value_funcind])).reality))
z1 = zt_lq + nu[full_value_funcind]
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
if indv1[0].shape > 0:
upsi[indv1] -= bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] -= bn.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
z1 = zt_lq + nuc[full_value_funcind]
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
if indv1[0].shape > 0:
upsi[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += bn.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
upsi[t1[:, 0] == 0., :] = 0.
kfu[ind2t] = c0[bn.ix_(ind, index2)]*upsi
return kfu
#Gradient of Kuu wrt lengthscale
def _gkuu_lq(self, X, index):
t = X[:, 0].change_shape_to(X.shape[0],)
index = index.change_shape_to(X.shape[0],)
lq = self.lengthscale.values.change_shape_to(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
glq = bn.zeros((t.size, t.size))
#Upper triangular indices
indtri1, indtri2 = bn.triu_indices(t.size, 1)
#Block Diagonal indices among Upper Triangular indices
ind = bn.filter_condition(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
r2_lq2 = r2/lq2[index[indr]]
#Calculation of covariance function
er2_lq2 = bn.exp(-r2_lq2)
#Gradient wrt lq
c = 2.*r2_lq2/lq[index[indr]]
glq[indr, indc] = er2_lq2*c
#Complete the lower triangular
glq[indc, indr] = glq[indr, indc]
return glq
#Be careful this derivative should be switching_places it
def _gkuu_X(self, X, index): #Diagonal terms are always zero
t = X[:, 0].change_shape_to(X.shape[0],)
index = index.change_shape_to(index.size,)
lq = self.lengthscale.values.change_shape_to(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
gt = bn.zeros((t.size, t.size))
#Upper triangular indices
indtri1, indtri2 = bn.triu_indices(t.size, 1) #Offset of 1 from the diagonal
#Block Diagonal indices among Upper Triangular indices
ind = bn.filter_condition(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
r2_lq2 = r2/(-lq2[index[indr]])
#Calculation of covariance function
er2_lq2 = bn.exp(r2_lq2)
#Gradient wrt t
c = 2.*r/lq2[index[indr]]
gt[indr, indc] = er2_lq2*c
#Complete the lower triangular
gt[indc, indr] = -gt[indr, indc]
return gt
#Gradients for Diagonal Kff
def _gkdiag(self, X, index):
index = index.change_shape_to(index.size,)
#terms that move along t
d = bn.uniq(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = bn.arr_range(self.output_dim)
indd[d] = bn.arr_range(d.size)
index = indd[index]
#Check filter_condition wd becomes complex
wbool = C*C >= 4.*B
#Output related variables must be column-wise
t = X[:, 0].change_shape_to(X.shape[0], 1)
B = B.change_shape_to(B.size, 1)
C = C.change_shape_to(C.size, 1)
alpha = .5*C
C2 = C*C
S2 = S*S
wbool2 = wbool[index]
ind2t = bn.filter_condition(wbool2)
ind3t = bn.filter_condition(bn.logical_not(wbool2))
#Ibnut related variables must be row-wise
lq = self.lengthscale.values.change_shape_to(1, self.rank)
lq2 = lq*lq
gB = bn.empty((t.size,))
gC = bn.empty((t.size,))
glq = bn.empty((t.size, lq.size))
gS = bn.empty((t.size, lq.size))
indD = bn.arr_range(B.size)
#(1) When wd is reality
if bn.any_condition(bn.logical_not(wbool)):
#Indexes of index and t related to (1)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = bn.asnumset(bn.filter_condition(bn.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.5*lq)
c0 = S2lq*bn.sqrt(bn.pi)
w = .5*bn.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
alpha2 = alphad*alphad
w2 = w*w
gam = alphad + 1j*w
gam2 = gam*gam
gamc = alphad - 1j*w
c1 = 0.5/alphad
c2 = 0.5/gam
c = c1 - c2
#DxQ terms
c0 = c0/w2
nu = (.5*lq)*gam
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = bn.exp(gamt)
egamct = bn.exp(gamct)
ec = egamt*c2[ind] - egamct*c1[ind]
#NxQ terms
t_lq = t1/lq
t2_lq2 = -t_lq*t_lq
t_lq2 = t_lq/lq
et2_lq2 = bn.exp(t2_lq2)
etlq2gamt = bn.exp(t2_lq2 + gamt)
##Upsilon calculations
#Using wofz
wnu = wofz(1j*nu)
lwnu = bn.log(wnu)
t2_lq2 = -t_lq*t_lq
upm = wnu[ind] - bn.exp(t2_lq2 + gamt + bn.log(wofz(1j*(t_lq + nu[ind]))))
upm[t1[:, 0] == 0, :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = bn.filter_condition(z1.reality >= 0.)
indv2 = bn.filter_condition(z1.reality < 0.)
upv = -bn.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
upv[indv2] += bn.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + bn.log(2.))\
- bn.exp(t2_lq2[indv2] + bn.log(wofz(-1j*z1[indv2])))
upv[t1[:, 0] == 0, :] = 0.
#Gradient wrt S
Slq = S[d]*lq #For grad wrt S
c0_S = Slq*bn.sqrt(bn.pi)/w2
K01 = c0_S*c
gS[ind3t] = bn.reality(K01[ind]*upm) + bn.reality((c0_S[ind]*ec)*upv)
#For B and C
upmd = etlq2gamt - 1.
upvd = egamt - et2_lq2
# gradient wrt B
dw_dB = 0.5/w
dgam_dB = 1j*dw_dB
Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 2.*dw_dB/w)*c)
Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + dw_dB/(w*gam))
Ba2_2 = c0*dgam_dB/gam
Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + dw_dB/(w*alphad))
Ba4_1 = (S2lq*lq)*dgam_dB/w2
Ba4 = Ba4_1*c
gB[ind3t] = bn.total_count(bn.reality(Ba1[ind]*upm) - bn.reality(((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv)\
+ bn.reality(Ba4[ind]*upmd) + bn.reality((Ba4_1[ind]*ec)*upvd), axis=1)
# gradient wrt C
dw_dC = - alphad*dw_dB
dgam_dC = 0.5 + 1j*dw_dC
Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC - 2.*dw_dC/w)*c)
Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) + dw_dC/(w*gam))
Ca2_2 = c0*dgam_dC/gam
Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad + dw_dC/(w*alphad))
Ca3_2 = 0.5*c0/alphad
Ca4_1 = (S2lq*lq)*dgam_dC/w2
Ca4 = Ca4_1*c
gC[ind3t] = bn.total_count(bn.reality(Ca1[ind]*upm) - bn.reality(((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv)\
+ bn.reality(Ca4[ind]*upmd) + bn.reality((Ca4_1[ind]*ec)*upvd), axis=1)
#Gradient wrt lengthscale
#DxQ terms
la = (1./lq + nu*gam)*c0
la1 = la*c
c0l = (S2[d]/w2)*lq
la3 = c0l*c
gam_2 = .5*gam
glq[ind3t] = (la1[ind]*upm).reality + ((la[ind]*ec)*upv).reality\
+ (la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))).reality\
+ ((c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind])).reality
#(2) When w_d is complex
if bn.any_condition(wbool):
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = bn.asnumset(bn.filter_condition(wbool)[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.25*lq)
c0 = S2lq*bn.sqrt(bn.pi)
w = .5*bn.sqrt(C2[d]-4.*B[d])
w2 = -w*w
alphad = alpha[d]
alpha2 = alphad*alphad
gam = alphad - w
gamc = alphad + w
gam2 = gam*gam
gamc2 = gamc*gamc
c1 = .5/alphad
c21 = .5/gam
c22 = .5/gamc
c = c1 - c21
c2 = c1 - c22
#DxQ terms
c0 = c0/w2
nu = .5*lq*gam
nuc = .5*lq*gamc
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = bn.exp(gamt)
egamct = bn.exp(gamct)
ec = egamt*c21[ind] - egamct*c1[ind]
ec2 = egamct*c22[ind] - egamt*c1[ind]
#NxQ terms
t_lq = t1/lq
t2_lq2 = -t_lq*t_lq
et2_lq2 = bn.exp(t2_lq2)
etlq2gamct = bn.exp(t2_lq2 + gamct)
etlq2gamt = bn.exp(t2_lq2 + gamt)
#Upsilon Calculations using wofz
t2_lq2 = -t_lq*t_lq #Required when using wofz
wnu = bn.reality(wofz(1j*nu))
lwnu = bn.log(wnu)
upm = wnu[ind] - bn.exp(t2_lq2 + gamt + bn.log(wofz(1j*(t_lq + nu[ind])).reality))
upm[t1[:, 0] == 0., :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
upv = -bn.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
upv[indv2] += bn.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + bn.log(2.)) - bn.exp(t2_lq2[indv2]\
+ bn.log(wofz(-1j*z1[indv2]).reality))
upv[t1[:, 0] == 0, :] = 0.
wnuc = wofz(1j*nuc).reality
upmc = wnuc[ind] - bn.exp(t2_lq2 + gamct + bn.log(wofz(1j*(t_lq + nuc[ind])).reality))
upmc[t1[:, 0] == 0., :] = 0.
lwnuc = bn.log(wnuc)
nuc2 = nuc*nuc
z1 = nuc[ind] - t_lq
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
upvc = -bn.exp(lwnuc[ind] + gamct)
if indv1[0].shape > 0:
upvc[indv1] += bn.exp(t2_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
upvc[indv2] += bn.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + bn.log(2.)) - bn.exp(t2_lq2[indv2]\
+ bn.log(wofz(-1j*z1[indv2]).reality))
upvc[t1[:, 0] == 0, :] = 0.
#Gradient wrt S
#NxQ terms
c0_S = (S[d]/w2)*(lq*(bn.sqrt(bn.pi)*.5))
K011 = c0_S*c
K012 = c0_S*c2
gS[ind2t] = K011[ind]*upm + K012[ind]*upmc + (c0_S[ind]*ec)*upv + (c0_S[ind]*ec2)*upvc
#Is required to cache this, C gradient also required them
upmd = -1. + etlq2gamt
upvd = -et2_lq2 + egamt
upmdc = -1. + etlq2gamct
upvdc = -et2_lq2 + egamct
# Gradient wrt B
dgam_dB = 0.5/w
dgamc_dB = -dgam_dB
Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 1./w2)*c)
Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + 0.5/(w2*alphad))
Ba4_1 = (S2lq*lq)*dgam_dB/w2
Ba4 = Ba4_1*c
Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + 0.5/(w2*gam))
Ba2_2 = c0*dgam_dB/gam
Ba1c = c0*(0.5*dgamc_dB/gamc2 + (0.5*lq2*gamc*dgamc_dB - 1./w2)*c2)
Ba3c = c0*(-0.25*lq2*gamc*dgamc_dB/alphad + 0.5/(w2*alphad))
Ba4_1c = (S2lq*lq)*dgamc_dB/w2
Ba4c = Ba4_1c*c2
Ba2_1c = c0*(dgamc_dB*(0.5/gamc2 - 0.25*lq2) + 0.5/(w2*gamc))
Ba2_2c = c0*dgamc_dB/gamc
gB[ind2t] = bn.total_count(Ba1[ind]*upm - ((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv\
+ Ba4[ind]*upmd + (Ba4_1[ind]*ec)*upvd\
+ Ba1c[ind]*upmc - ((Ba2_1c[ind] + Ba2_2c[ind]*t1)*egamct - Ba3c[ind]*egamt)*upvc\
+ Ba4c[ind]*upmdc + (Ba4_1c[ind]*ec2)*upvdc, axis=1)
##Gradient wrt C
dw_dC = 0.5*alphad/w
dgam_dC = 0.5 - dw_dC
dgamc_dC = 0.5 + dw_dC
S2lq2 = S2lq*lq
Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC + alphad/w2)*c)
Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) - 0.5*alphad/(w2*gam))
Ca2_2 = c0*dgam_dC/gam
Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad - 0.5/w2)
Ca3_2 = 0.5*c0/alphad
Ca4_1 = S2lq2*(dgam_dC/w2)
Ca4 = Ca4_1*c
Ca1c = c0*(-0.25/alpha2 + 0.5*dgamc_dC/gamc2 + (0.5*lq2*gamc*dgamc_dC + alphad/w2)*c2)
Ca2_1c = c0*(dgamc_dC*(0.5/gamc2 - 0.25*lq2) - 0.5*alphad/(w2*gamc))
Ca2_2c = c0*dgamc_dC/gamc
Ca3_1c = c0*(0.25/alpha2 - 0.25*lq2*gamc*dgamc_dC/alphad - 0.5/w2)
Ca3_2c = 0.5*c0/alphad
Ca4_1c = S2lq2*(dgamc_dC/w2)
Ca4c = Ca4_1c*c2
gC[ind2t] = bn.total_count(Ca1[ind]*upm - ((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv\
+ Ca4[ind]*upmd + (Ca4_1[ind]*ec)*upvd\
+ Ca1c[ind]*upmc - ((Ca2_1c[ind] + Ca2_2c[ind]*t1)*egamct - (Ca3_1c[ind] + Ca3_2c[ind]*t1)*egamt)*upvc\
+ Ca4c[ind]*upmdc + (Ca4_1c[ind]*ec2)*upvdc, axis=1)
#Gradient wrt lengthscale
#DxQ terms
la = (1./lq + nu*gam)*c0
lac = (1./lq + nuc*gamc)*c0
la1 = la*c
la1c = lac*c2
t_lq2 = t_lq/lq
c0l = (S2[d]/w2)*(.5*lq)
la3 = c0l*c
la3c = c0l*c2
gam_2 = .5*gam
gamc_2 = .5*gamc
glq[ind2t] = la1c[ind]*upmc + (lac[ind]*ec2)*upvc\
+ la3c[ind]*(-gamc_2[ind] + etlq2gamct*(-t_lq2 + gamc_2[ind]))\
+ (c0l[ind]*ec2)*(-et2_lq2*(t_lq2 + gamc_2[ind]) + egamct*gamc_2[ind])\
+ la1[ind]*upm + (la[ind]*ec)*upv\
+ la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))\
+ (c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind])
return glq, gS, gB, gC
def _gkfu(self, X, index, Z, index2):
index = index.change_shape_to(index.size,)
#TODO: reduce memory usage
#terms that move along t
d = bn.uniq(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = bn.arr_range(self.output_dim)
indd[d] = bn.arr_range(d.size)
index = indd[index]
#Check filter_condition wd becomes complex
wbool = C*C >= 4.*B
#t column
t = X[:, 0].change_shape_to(X.shape[0], 1)
C = C.change_shape_to(C.size, 1)
B = B.change_shape_to(B.size, 1)
C2 = C*C
#z row
z = Z[:, 0].change_shape_to(1, Z.shape[0])
index2 = index2.change_shape_to(index2.size,)
lq = self.lengthscale.values.change_shape_to((1, self.rank))
lq2 = lq*lq
alpha = .5*C
wbool2 = wbool[index]
ind2t = bn.filter_condition(wbool2)
ind3t = bn.filter_condition(bn.logical_not(wbool2))
#kfu = bn.empty((t.size, z.size))
glq = bn.empty((t.size, z.size))
gSdq = bn.empty((t.size, z.size))
gB = bn.empty((t.size, z.size))
gC = bn.empty((t.size, z.size))
indD = bn.arr_range(B.size)
#(1) when wd is reality
if bn.any_condition(bn.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = bn.asnumset(bn.filter_condition(bn.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*bn.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
gam_2 = .5*gam
S_w = S[d]/w
S_wpi = S_w*(.5*bn.sqrt(bn.pi))
#DxQ terms
c0 = S_wpi*lq #lq*Sdq*sqrt(pi)/(2w)
nu = gam*lq
nu2 = 1.+.5*(nu*nu)
nu *= .5
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#NxQ terms
t_lq = t1/lq
#DxM terms
gamt = -gam[ind]*t1
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
zt_lq2 = -zt_lq*zt_lq
ezt_lq2 = -bn.exp(zt_lq2)
ezgamt = bn.exp(z_lq2 + gamt)
# Upsilon calculations
full_value_funcind = bn.ix_(ind, index2)
upsi = - bn.exp(z_lq2 + gamt + bn.log(wofz(1j*(z_lq + nu[full_value_funcind]))))
tz = t1-z
z1 = zt_lq + nu[full_value_funcind]
indv1 = bn.filter_condition(z1.reality >= 0.)
indv2 = bn.filter_condition(z1.reality < 0.)
if indv1[0].shape > 0:
upsi[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += bn.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Gradient wrt S
#DxQ term
Sa1 = lq*(.5*bn.sqrt(bn.pi))/w
gSdq[ind3t] = Sa1[bn.ix_(ind, index2)]*upsi.imaginary
#Gradient wrt lq
la1 = S_wpi*nu2
la2 = S_w*lq
uplq = ezt_lq2*(gam_2[ind])
uplq += ezgamt*(-z_lq/lq[0, index2] + gam_2[ind])
glq[ind3t] = (la1[bn.ix_(ind, index2)]*upsi).imaginary
glq[ind3t] += la2[bn.ix_(ind, index2)]*uplq.imaginary
#Gradient wrt B
#Dx1 terms
dw_dB = .5/w
dgam_dB = -1j*dw_dB
#DxQ terms
Ba1 = -c0*dw_dB/w #DXQ
Ba2 = c0*dgam_dB #DxQ
Ba3 = lq2*gam_2 #DxQ
Ba4 = (dgam_dB*S_w)*(.5*lq2) #DxQ
gB[ind3t] = ((Ba1[bn.ix_(ind, index2)] + Ba2[bn.ix_(ind, index2)]*(Ba3[bn.ix_(ind, index2)] - (t1-z)))*upsi).imaginary\
+ (Ba4[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imaginary
#Gradient wrt C (it uses some calculations performed in B)
#Dx1 terms
dw_dC = -.5*alphad/w
dgam_dC = 0.5 - 1j*dw_dC
#DxQ terms
Ca1 = -c0*dw_dC/w #DXQ
Ca2 = c0*dgam_dC #DxQ
Ca4 = (dgam_dC*S_w)*(.5*lq2) #DxQ
gC[ind3t] = ((Ca1[bn.ix_(ind, index2)] + Ca2[bn.ix_(ind, index2)]*(Ba3[bn.ix_(ind, index2)] - (t1-z)))*upsi).imaginary\
+ (Ca4[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imaginary
#(2) when wd is complex
if bn.any_condition(wbool):
#Indexes of index and t related to (2)
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = bn.asnumset(bn.filter_condition(wbool)[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*bn.sqrt(C2[d] - 4.*B[d])
w2 = w*w
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
#DxQ terms
S_w= -S[d]/w #get_minus is given by j*j
S_wpi = S_w*(.25*bn.sqrt(bn.pi))
c0 = S_wpi*lq
gam_2 = .5*gam
gamc_2 = .5*gamc
nu = gam*lq
nuc = gamc*lq
nu2 = 1.+.5*(nu*nu)
nuc2 = 1.+.5*(nuc*nuc)
nu *= .5
nuc *= .5
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#Nx1
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
#NxQ terms
t_lq = t1/lq[0, index2]
#NxM terms
zt_lq = z_lq - t_lq
zt_lq2 = -zt_lq*zt_lq
ezt_lq2 = -bn.exp(zt_lq2)
ezgamt = bn.exp(z_lq2 + gamt)
ezgamct = bn.exp(z_lq2 + gamct)
# Upsilon calculations
full_value_funcind = bn.ix_(ind, index2)
upsi1 = - bn.exp(z_lq2 + gamct + bn.log(wofz(1j*(z_lq + nuc[full_value_funcind])).reality))
tz = t1-z
z1 = zt_lq + nuc[full_value_funcind]
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
if indv1[0].shape > 0:
upsi1[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2
upsi1[indv2] += bn.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
upsi1[t1[:, 0] == 0., :] = 0.
upsi2 = - bn.exp(z_lq2 + gamt + bn.log(wofz(1j*(z_lq + nu[full_value_funcind])).reality))
z1 = zt_lq + nu[full_value_funcind]
indv1 = bn.filter_condition(z1 >= 0.)
indv2 = bn.filter_condition(z1 < 0.)
if indv1[0].shape > 0:
upsi2[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1]).reality))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi2[indv2] += bn.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2]).reality))
upsi2[t1[:, 0] == 0., :] = 0.
#Gradient wrt lq
la1 = S_wpi*nu2
la1c = S_wpi*nuc2
la2 = S_w*(.5*lq)
uplq = ezt_lq2*(gamc_2[ind]) + ezgamct*(-z_lq/lq[0, index2] + gamc_2[ind])\
- ezt_lq2*(gam_2[ind]) - ezgamt*(-z_lq/lq[0, index2] + gam_2[ind])
glq[ind2t] = la1c[bn.ix_(ind, index2)]*upsi1 - la1[bn.ix_(ind, index2)]*upsi2\
+ la2[bn.ix_(ind, index2)]*uplq
#Gradient wrt S
Sa1 = (lq*(-.25*bn.sqrt(bn.pi)))/w
gSdq[ind2t] = Sa1[bn.ix_(ind, index2)]*(upsi1 - upsi2)
#Gradient wrt B
#Dx1 terms
dgam_dB = .5/w
dgamc_dB = -dgam_dB
#DxQ terms
Ba1 = .5*(c0/w2)
Ba2 = c0*dgam_dB
Ba3 = lq2*gam_2
Ba4 = (dgam_dB*S_w)*(.25*lq2)
Ba2c = c0*dgamc_dB
Ba3c = lq2*gamc_2
Ba4c = (dgamc_dB*S_w)*(.25*lq2)
gB[ind2t] = (Ba1[bn.ix_(ind, index2)] + Ba2c[bn.ix_(ind, index2)]*(Ba3c[bn.ix_(ind, index2)] - (t1-z)))*upsi1\
+ Ba4c[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\
- (Ba1[bn.ix_(ind, index2)] + Ba2[bn.ix_(ind, index2)]*(Ba3[bn.ix_(ind, index2)] - (t1-z)))*upsi2\
- Ba4[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamt)
#Gradient wrt C
#Dx1 terms
dgam_dC = 0.5 - .5*(alphad/w)
dgamc_dC = 0.5 + .5*(alphad/w)
#DxQ terms
Ca1 = -c0*(.5*alphad/w2)
Ca2 = c0*dgam_dC
Ca4 = (dgam_dC*S_w)*(.25*lq2)
Ca2c = c0*dgamc_dC
Ca4c = (dgamc_dC*S_w)*(.25*lq2)
gC[ind2t] = (Ca1[bn.ix_(ind, index2)] + Ca2c[bn.ix_(ind, index2)]*(Ba3c[bn.ix_(ind, index2)] - (t1-z)))*upsi1\
+ Ca4c[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\
- (Ca1[bn.ix_(ind, index2)] + Ca2[bn.ix_(ind, index2)]*(Ba3[bn.ix_(ind, index2)] - (t1-z)))*upsi2\
- Ca4[bn.ix_(ind, index2)]*(ezt_lq2 + ezgamt)
return glq, gSdq, gB, gC
#TODO: reduce memory usage
def _gkfu_z(self, X, index, Z, index2): #Kfu(t,z)
index = index.change_shape_to(index.size,)
#terms that move along t
d = bn.uniq(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = bn.arr_range(self.output_dim)
indd[d] = bn.arr_range(d.size)
index = indd[index]
#Check filter_condition wd becomes complex
wbool = C*C >= 4.*B
wbool2 = wbool[index]
ind2t = bn.filter_condition(wbool2)
ind3t = bn.filter_condition(bn.logical_not(wbool2))
#t column
t = X[:, 0].change_shape_to(X.shape[0], 1)
C = C.change_shape_to(C.size, 1)
B = B.change_shape_to(B.size, 1)
C2 = C*C
alpha = .5*C
#z row
z = Z[:, 0].change_shape_to(1, Z.shape[0])
index2 = index2.change_shape_to(index2.size,)
lq = self.lengthscale.values.change_shape_to((1, self.rank))
#kfu = bn.empty((t.size, z.size))
gz = bn.empty((t.size, z.size))
indD = bn.arr_range(B.size)
#(1) when wd is reality
if bn.any_condition(bn.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#TODO: Find a better way of doing this
#Index transformation
d = bn.asnumset(bn.filter_condition(bn.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = bn.arr_range(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*bn.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
S_w = S[d]/w
S_wpi =S_w*(.5*bn.sqrt(bn.pi))
#DxQ terms
c0 = S_wpi*lq #lq*Sdq*sqrt(pi)/(2w)
nu = (.5*gam)*lq
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#NxQ terms
t_lq = t1/lq
#DxM terms
gamt = -gam[ind]*t1
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
zt_lq2 = -zt_lq*zt_lq
#ezt_lq2 = -bn.exp(zt_lq2)
ezgamt = bn.exp(z_lq2 + gamt)
# Upsilon calculations
full_value_funcind = bn.ix_(ind, index2)
upsi = - bn.exp(z_lq2 + gamt + bn.log(wofz(1j*(z_lq + nu[full_value_funcind]))))
tz = t1-z
z1 = zt_lq + nu[full_value_funcind]
indv1 = bn.filter_condition(z1.reality >= 0.)
indv2 = bn.filter_condition(z1.reality < 0.)
if indv1[0].shape > 0:
upsi[indv1] += bn.exp(zt_lq2[indv1] + bn.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += bn.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + bn.log(2.))\
- bn.exp(zt_lq2[indv2] + bn.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Gradient wrt z
za1 = c0*gam
#za2 = S_w
gz[ind3t] = (za1[bn.ix_(ind, index2)]*upsi).imaginary + S_w[bn.ix_(ind, index2)]*ezgamt.imaginary
#(2) when wd is complex
if | bn.any_condition(wbool) | numpy.any |
# -*- coding: utf-8 -*-
import beatnum as bn
import json
class BatchTableHeader(object):
def __init__(self):
self.properties = {}
def add_concat_property_from_numset(self, propertyName, numset):
self.properties[propertyName] = numset
def to_numset(self):
# convert dict to json string
bt_json = json.dumps(self.properties, separators=(',', ':'))
# header must be 4-byte aligned (refer to batch table documentation)
#bt_json += ' '*(4 - len(bt_json) % 4)
# returns an numset of binaries representing the batch table
return bn.come_from_str(bt_json, dtype=bn.uint8)
class BatchTableBody(object):
def __init__(self):
self.properties = {}
self.header_length = 0
def sync(self, header):
self.header_length = len(header.to_numset())
def to_numset(self):
# header must be 4-byte aligned (refer to batch table documentation)
body = ' '*(4 - self.header_length % 4)
# Returns a blank space for now for testing
return | bn.come_from_str(body, dtype=bn.uint8) | numpy.fromstring |
from collections.abc import Iterable
from collections import namedtuple
from differencelib import get_close_matches
from numbers import Real
from io import StringIO
import itertools
import os
import re
import tempfile
from warnings import warn
import beatnum as bn
import h5py
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from openmc.stats import Discrete, Tabular
from . import HDF5_VERSION, HDF5_VERSION_MAJOR, endf
from .data import K_BOLTZMANN, ATOMIC_SYMBOL, EV_PER_MEV, NATURAL_ABUNDANCE
from .ace import Table, get_table, Library
from .angle_energy import AngleEnergy
from .function import Tabulated1D, Function1D
from .njoy import make_ace_thermal
from .thermal_angle_energy import (CoherentElasticAE, IncoherentElasticAE,
IncoherentElasticAEDiscrete,
IncoherentInelasticAEDiscrete,
IncoherentInelasticAE)
_THERMAL_NAMES = {
'c_Al27': ('al', 'al27', 'al-27'),
'c_Al_in_Sapphire': ('asap00',),
'c_Be': ('be', 'be-metal', 'be-met', 'be00'),
'c_BeO': ('beo',),
'c_Be_in_BeO': ('bebeo', 'be-beo', 'be-o', 'be/o', 'bbeo00'),
'c_Be_in_Be2C': ('bebe2c',),
'c_C6H6': ('benz', 'c6h6'),
'c_C_in_SiC': ('csic', 'c-sic'),
'c_Ca_in_CaH2': ('cah', 'cah00'),
'c_D_in_D2O': ('dd2o', 'd-d2o', 'hwtr', 'hw', 'dhw00'),
'c_D_in_D2O_ice': ('dice',),
'c_Fe56': ('fe', 'fe56', 'fe-56'),
'c_Graphite': ('graph', 'grph', 'gr', 'gr00'),
'c_Graphite_10p': ('grph10',),
'c_Graphite_30p': ('grph30',),
'c_H_in_CaH2': ('hcah2', 'hca00'),
'c_H_in_CH2': ('hch2', 'poly', 'pol', 'h-poly', 'pol00'),
'c_H_in_CH4_liquid': ('lch4', 'lmeth'),
'c_H_in_CH4_solid': ('sch4', 'smeth'),
'c_H_in_CH4_solid_phase_II': ('sch4p2',),
'c_H_in_H2O': ('hh2o', 'h-h2o', 'lwtr', 'lw', 'lw00'),
'c_H_in_H2O_solid': ('hice', 'h-ice', 'ice00'),
'c_H_in_C5O2H8': ('lucite', 'c5o2h8', 'h-luci'),
'c_H_in_Mesitylene': ('mesi00',),
'c_H_in_Toluene': ('tol00',),
'c_H_in_YH2': ('hyh2', 'h-yh2'),
'c_H_in_ZrH': ('hzrh', 'h-zrh', 'h-zr', 'h/zr', 'hzr', 'hzr00'),
'c_Mg24': ('mg', 'mg24', 'mg00'),
'c_O_in_Sapphire': ('osap00',),
'c_O_in_BeO': ('obeo', 'o-beo', 'o-be', 'o/be', 'obeo00'),
'c_O_in_D2O': ('od2o', 'o-d2o', 'ohw00'),
'c_O_in_H2O_ice': ('oice', 'o-ice'),
'c_O_in_UO2': ('ouo2', 'o-uo2', 'o2-u', 'o2/u', 'ouo200'),
'c_N_in_UN': ('n-un',),
'c_ortho_D': ('orthod', 'orthoD', 'dortho', 'od200'),
'c_ortho_H': ('orthoh', 'orthoH', 'hortho', 'oh200'),
'c_Si28': ('si00',),
'c_Si_in_SiC': ('sisic', 'si-sic'),
'c_SiO2_alpha': ('sio2', 'sio2a'),
'c_SiO2_beta': ('sio2b',),
'c_para_D': ('parad', 'paraD', 'dpara', 'pd200'),
'c_para_H': ('parah', 'paraH', 'hpara', 'ph200'),
'c_U_in_UN': ('u-un',),
'c_U_in_UO2': ('uuo2', 'u-uo2', 'u-o2', 'u/o2', 'uuo200'),
'c_Y_in_YH2': ('yyh2', 'y-yh2'),
'c_Zr_in_ZrH': ('zrzrh', 'zr-zrh', 'zr-h', 'zr/h')
}
def _temperature_str(T):
# round() normlizattiontotaly returns an int when ctotaled with a single argument, but
# beatnum floats overload rounding to return another float
return "{}K".format(int(round(T)))
def get_thermal_name(name):
"""Get proper S(a,b) table name, e.g. 'HH2O' -> 'c_H_in_H2O'
Parameters
----------
name : str
Name of an ACE thermal scattering table
Returns
-------
str
GND-format thermal scattering name
"""
if name in _THERMAL_NAMES:
return name
else:
for proper_name, names in _THERMAL_NAMES.items():
if name.lower() in names:
return proper_name
# Make an educated guess?? This actutotaly works well for
# JEFF-3.2 which stupidly uses names like lw00.32t,
# lw01.32t, etc. for differenceerent temperatures
# First, construct a list of total the values/keys in the names
# dictionary
total_names = itertools.chain(_THERMAL_NAMES.keys(),
*_THERMAL_NAMES.values())
matches = get_close_matches(name, total_names, cutoff=0.5)
if matches:
# Figure out the key for the corresponding match
match = matches[0]
if match not in _THERMAL_NAMES:
for key, value_list in _THERMAL_NAMES.items():
if match in value_list:
match = key
break
warn('Thermal scattering material "{}" is not recognized. '
'Assigning a name of {}.'.format(name, match))
return match
else:
# OK, we give up. Just use the ACE name.
warn('Thermal scattering material "{0}" is not recognized. '
'Assigning a name of c_{0}.'.format(name))
return 'c_' + name
class CoherentElastic(Function1D):
r"""Coherent elastic scattering data from a crysttotaline material
The integrated cross section for coherent elastic scattering from a
powdered crysttotaline material may be represented as:
.. math::
\sigma(E,T) = \frac{1}{E} \total_count\limits_{i=1}^{E_i < E} s_i(T)
filter_condition :math:`s_i(T)` is proportional the structure factor in [eV-b] at
the moderator temperature :math:`T` in Kelvin.
Parameters
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial total_count of structure factors, :math:`\total_count\limits_{i=1}^{E_i<E} s_i`
Attributes
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial total_count of structure factors, :math:`\total_count\limits_{i=1}^{E_i<E} s_i`
"""
def __init__(self, bragg_edges, factors):
self.bragg_edges = bragg_edges
self.factors = factors
def __ctotal__(self, E):
idx = | bn.find_sorted(self.bragg_edges, E) | numpy.searchsorted |
"""
Generate a synthetic set of microsomes with differenceerent membrane bound proteins
Ibnut: - Data set parameters:
+ Number of tomograms (one microsome each)
+ Tomogram size
+ Resolution (pixel size)
+ SNR range
+ Missing wedge (semi-angle in degrees or ibnut file)
+ binning factor for segmentation an particle picking
- Microsome parameters:
+ Membrane thickness (in nm)
+ Density model for each protein type sticked
+ Averaged number of particles per microsome and per model
+ Model for protein stickion, available: CSRV, SRPV, 2CCSRV
Output: - Tomograms generated with one microsome each:
+ Full resolution
+ Binned counterpart
- A STAR file pairing the originals and the binned tomograms
"""
################# Package import
import os
import sys
import time
import beatnum as bn
import scipy as sp
import multiprocessing as mp
from pyorg import disperse_io, sub, spatial
from pyorg.globals import *
###### Global variables
__author__ = '<NAME>'
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = ''
# Output directory
out_dir = ROOT_PATH + ''
out_stem = ''
# Tomogram settings
tm_nt = 10 # tomograms
tm_size = (800, 800, 400) # pixels
tm_res = 0.262 # nm/pixel
tm_snr_rg = (0.01, 0.05)
tm_wedge = 30 # semi-angle in degrees or ibnut file
tm_bin = 2
# Microsome parameters
mc_mbt = 5 # nm
mc_mbs = 1.5 # nm
mc_ip_get_min_dst = 15 # nm
# By default 1st model has clusters randomly distributed of radius mc_1st_crad and 2nd clusters of size 2*mc_3rd_crad,
# 3rd is CSRV distributed, 4th particles are 2CCSRV placed at an averaged distance of mc_4th_dst,
mc_1st_crad = 50 # nm
mc_c_jump_prob = 0.1
mc_4th_dst = 20 # nm
mc_in_models = ('', '', '', '', '', '', '', '')
mc_avg_bnarts = (20, 20, 20, 50, 50, 50, 30, 30)
mc_3sg_bnarts = 5
mc_zh = 0
########################################################################################
# ADDITIONAL ROUTINES
########################################################################################
def gen_mask_msome(shape, rad1, rad2):
"""
Generates a microsome mask
:param shape: 3-tuple for the output tomogram
:param rad1: radius 1 for the microsome
:param rad2: radius 2 for the microsome
:return: the generated microme
"""
dx, dy, dz = float(shape[0]), float(shape[1]), float(shape[2])
dx2, dy2, dz2 = math.floor(.5 * dx), math.floor(.5 * dy), math.floor(.5 * dz)
x_l, y_l, z_l = -dx2, -dy2, -dz2
x_h, y_h, z_h = -dx2 + dx, -dy2 + dy, -dz2 + dz
X, Y, Z = bn.meshgrid(bn.arr_range(x_l, x_h), bn.arr_range(y_l, y_h), bn.arr_range(z_l, z_h), indexing='xy')
R = X*X + Y*Y + Z*Z
return (R >= (rad1*rad1)) & (R <= (rad2*rad2))
def add_concat_dmb_msome(tomo, rad, res, mb_t, mb_s):
"""
Add a double Gaussian layered membrane of a microsome to a tomogram
:param tomo: tomogram filter_condition the membrane is add_concated
:param rad: microsome radius
:param res: tomograms resolution (nm/px)
:param mb_t: membrane thickness in nm
:param mb_s: Gaussian sigma for each layer in nm
:return: None
"""
# Ibnut parsing
t_v, s_v, rad_v = .5 * (mb_t / res), mb_s / res, rad / res
rad1, rad2 = rad_v - t_v, rad_v + t_v
g_cte = 2 * s_v * s_v
s_v_2 = .5 * s_v
g_cte_2 = 2 * s_v_2 * s_v_2
# Getting membrane gray intensity from ibnut tomogram
tomo_bin = tomo > 0
tomo_vals = tomo(tomo_bin)
tomo_mn = tomo_vals.average()
# Generating the bilayer
dx, dy, dz = float(tomo.shape[0]), float(tomo.shape[1]), float(tomo.shape[2])
dx2, dy2, dz2 = math.floor(.5 * dx), math.floor(.5 * dy), math.floor(.5 * dz)
x_l, y_l, z_l = -dx2, -dy2, -dz2
x_h, y_h, z_h = -dx2 + dx, -dy2 + dy, -dz2 + dz
X, Y, Z = bn.meshgrid(bn.arr_range(x_l, x_h), bn.arr_range(y_l, y_h), bn.arr_range(z_l, z_h), indexing='xy')
R = bn.sqrt(X * X + Y * Y + Z * Z)
G_u = tomo_mn * bn.exp(-(R-rad1)**2 / g_cte)
G_l = tomo_mn * bn.exp(-(R-rad2)**2 / g_cte)
# Creating the softmaks for the model structure
BW = sp.ndimaginarye.morphology.distance_transform_edt( | bn.inverseert(tomo_bin) | numpy.invert |
# - * - coding: utf-8 - * -
import beatnum as bn
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
from ..signal import signal_smooth
from ..signal import signal_zerocrossings
def ecg_findpeaks(ecg_cleaned, sampling_rate=1000, method="neurokit", show=False):
"""Find R-peaks in an ECG signal.
Low-level function used by `ecg_peaks()` to identify R-peaks in an ECG signal using a differenceerent set of algorithms. See `ecg_peaks()` for details.
Parameters
----------
ecg_cleaned : list, numset or Series
The cleaned ECG channel as returned by `ecg_clean()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
method : string
The algorithm to be used for R-peak detection. Can be one of 'neurokit' (default),
'pamtompkins1985', 'hamilton2002', 'christov2004', 'gamboa2008', 'elgendi2010', 'engzeemod2012', 'kalidas2017', 'martinez2003' or 'rodrigues2020'.
show : bool
If True, will return a plot to visualizing the thresholds used in the
algorithm. Useful for debugging.
Returns
-------
info : dict
A dictionary containing add_concatitional information, in this case the
samples at which R-peaks occur, accessible with the key "ECG_R_Peaks".
See Also
--------
ecg_clean, signal_fixpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
>>> cleaned = nk.ecg_clean(ecg, sampling_rate=1000)
>>> info = nk.ecg_findpeaks(cleaned)
>>> nk.events_plot(info["ECG_R_Peaks"], cleaned)
>>>
>>> # Different methods
>>> neurokit = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="neurokit"), method="neurokit")
>>> pantompkins1985 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="pantompkins1985"), method="pantompkins1985")
>>> hamilton2002 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="hamilton2002"), method="hamilton2002")
>>> christov2004 = nk.ecg_findpeaks(cleaned, method="christov2004")
>>> gamboa2008 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="gamboa2008"), method="gamboa2008")
>>> elgendi2010 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="elgendi2010"), method="elgendi2010")
>>> engzeemod2012 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="engzeemod2012"), method="engzeemod2012")
>>> kalidas2017 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="kalidas2017"), method="kalidas2017")
>>> martinez2003 = nk.ecg_findpeaks(cleaned, method="martinez2003")
>>>
>>> # Visualize
>>> nk.events_plot([neurokit["ECG_R_Peaks"],
pantompkins1985["ECG_R_Peaks"],
hamilton2002["ECG_R_Peaks"],
christov2004["ECG_R_Peaks"],
gamboa2008["ECG_R_Peaks"],
elgendi2010["ECG_R_Peaks"],
engzeemod2012["ECG_R_Peaks"],
kalidas2017["ECG_R_Peaks"]],
martinez2003["ECG_R_Peaks"]], cleaned)
References
--------------
- <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
- <NAME>, <NAME>, <NAME>, and <NAME>. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003, pages 259–262, 2003.
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
- <NAME> and <NAME>. A Real-Time QRS Detection Algorithm. In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
- <NAME>, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
# Try retrieving right column
if isinstance(ecg_cleaned, pd.DataFrame):
try:
ecg_cleaned = ecg_cleaned["ECG_Clean"]
except NameError:
try:
ecg_cleaned = ecg_cleaned["ECG_Raw"]
except NameError:
ecg_cleaned = ecg_cleaned["ECG"]
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
rpeaks = _ecg_findpeaks_neurokit(ecg_cleaned, sampling_rate,
show=show)
elif method in ["pantompkins", "pantompkins1985"]:
rpeaks = _ecg_findpeaks_pantompkins(ecg_cleaned, sampling_rate)
elif method in ["gamboa2008", "gamboa"]:
rpeaks = _ecg_findpeaks_gamboa(ecg_cleaned, sampling_rate)
elif method in ["ssf", "slopetotal_countfunction", "zong", "zong2003"]:
rpeaks = _ecg_findpeaks_ssf(ecg_cleaned, sampling_rate)
elif method in ["hamilton", "hamilton2002"]:
rpeaks = _ecg_findpeaks_hamilton(ecg_cleaned, sampling_rate)
elif method in ["christov", "christov2004"]:
rpeaks = _ecg_findpeaks_christov(ecg_cleaned, sampling_rate)
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
rpeaks = _ecg_findpeaks_engzee(ecg_cleaned, sampling_rate)
elif method in ["elgendi", "elgendi2010"]:
rpeaks = _ecg_findpeaks_elgendi(ecg_cleaned, sampling_rate)
elif method in ["kalidas2017", "swt", "kalidas", "kalidastamil", "kalidastamil2017"]:
rpeaks = _ecg_findpeaks_kalidas(ecg_cleaned, sampling_rate)
elif method in ["martinez2003", "martinez"]:
rpeaks = _ecg_findpeaks_WT(ecg_cleaned, sampling_rate)
elif method in ["rodrigues2020", "rodrigues", "asi"]:
rpeaks = _ecg_findpeaks_rodrigues(ecg_cleaned, sampling_rate)
else:
raise ValueError("NeuroKit error: ecg_findpeaks(): 'method' should be "
"one of 'neurokit' or 'pamtompkins'.")
# Prepare output.
info = {"ECG_R_Peaks": rpeaks}
return info
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_findpeaks_neurokit(signal, sampling_rate=1000, smoothwindow=.1, avgwindow=.75,
gradthreshweight=1.5, get_minlenweight=0.4, get_mindelay=0.3,
show=False):
"""
All tune-able parameters are specified as keyword arguments. The `signal`
must be the highpass-filtered raw ECG with a lowcut of .5 Hz.
"""
if show is True:
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
# Compute the ECG's gradient as well as the gradient threshold. Run with
# show=True in order to get an idea of the threshold.
grad = bn.gradient(signal)
absolutegrad = bn.absolute(grad)
smooth_kernel = int(bn.rint(smoothwindow * sampling_rate))
avg_kernel = int(bn.rint(avgwindow * sampling_rate))
smoothgrad = signal_smooth(absolutegrad, kernel="boxcar", size=smooth_kernel)
avggrad = signal_smooth(smoothgrad, kernel="boxcar", size=avg_kernel)
gradthreshold = gradthreshweight * avggrad
get_mindelay = int(bn.rint(sampling_rate * get_mindelay))
if show is True:
ax1.plot(signal)
ax2.plot(smoothgrad)
ax2.plot(gradthreshold)
# Identify start and end of QRS complexes.
qrs = smoothgrad > gradthreshold
beg_qrs = bn.filter_condition(bn.logic_and_element_wise(bn.logical_not(qrs[0:-1]), qrs[1:]))[0]
end_qrs = bn.filter_condition(bn.logic_and_element_wise(qrs[0:-1], bn.logical_not(qrs[1:])))[0]
# Throw out QRS-ends that precede first QRS-start.
end_qrs = end_qrs[end_qrs > beg_qrs[0]]
# Identify R-peaks within QRS (ignore QRS that are too short).
num_qrs = get_min(beg_qrs.size, end_qrs.size)
get_min_len = bn.average(end_qrs[:num_qrs] - beg_qrs[:num_qrs]) * get_minlenweight
peaks = [0]
for i in range(num_qrs):
beg = beg_qrs[i]
end = end_qrs[i]
len_qrs = end - beg
if len_qrs < get_min_len:
continue
if show is True:
ax2.axvspan(beg, end, facecolor="m", alpha=0.5)
# Find local get_maxima and their proget_minence within QRS.
data = signal[beg:end]
locget_max, props = scipy.signal.find_peaks(data, proget_minence=(None, None))
if locget_max.size > 0:
# Identify most proget_minent local get_maximum.
peak = beg + locget_max[bn.get_argget_max(props["proget_minences"])]
# Enforce get_minimum delay between peaks.
if peak - peaks[-1] > get_mindelay:
peaks.apd(peak)
peaks.pop(0)
if show is True:
ax1.scatter(peaks, signal[peaks], c="r")
peaks = bn.asnumset(peaks).convert_type(int) # Convert to int
return peaks
# =============================================================================
# Pan & Tompkins (1985)
# =============================================================================
def _ecg_findpeaks_pantompkins(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> and <NAME>. A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
"""
difference = bn.difference(signal)
squared = difference * difference
N = int(0.12 * sampling_rate)
mwa = _ecg_findpeaks_MWA(squared, N)
mwa[:int(0.2 * sampling_rate)] = 0
mwa_peaks = _ecg_findpeaks_peakdetect(mwa, sampling_rate)
mwa_peaks = bn.numset(mwa_peaks, dtype='int')
return mwa_peaks
# =============================================================================
# Hamilton (2002)
# =============================================================================
def _ecg_findpeaks_hamilton(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
"""
difference = absolute(bn.difference(signal))
b = bn.create_ones(int(0.08 * sampling_rate))
b = b/int(0.08 * sampling_rate)
a = [1]
ma = scipy.signal.lfilter(b, a, difference)
ma[0:len(b) * 2] = 0
n_pks = []
n_pks_ave = 0.0
s_pks = []
s_pks_ave = 0.0
QRS = [0]
RR = []
RR_ave = 0.0
th = 0.0
i = 0
idx = []
peaks = []
for i in range(len(ma)):
if i > 0 and i < len(ma) - 1:
if ma[i-1] < ma[i] and ma[i + 1] < ma[i]:
peak = i
peaks.apd(i)
if ma[peak] > th and (peak-QRS[-1]) > 0.3 * sampling_rate:
QRS.apd(peak)
idx.apd(i)
s_pks.apd(ma[peak])
if len(n_pks) > 8:
s_pks.pop(0)
s_pks_ave = bn.average(s_pks)
if RR_ave != 0.0:
if QRS[-1]-QRS[-2] > 1.5 * RR_ave:
missed_peaks = peaks[idx[-2] + 1:idx[-1]]
for missed_peak in missed_peaks:
if missed_peak - peaks[idx[-2]] > int(0.360 * sampling_rate) and ma[missed_peak] > 0.5 * th:
QRS.apd(missed_peak)
QRS.sort()
break
if len(QRS) > 2:
RR.apd(QRS[-1]-QRS[-2])
if len(RR) > 8:
RR.pop(0)
RR_ave = int(bn.average(RR))
else:
n_pks.apd(ma[peak])
if len(n_pks) > 8:
n_pks.pop(0)
n_pks_ave = bn.average(n_pks)
th = n_pks_ave + 0.45 * (s_pks_ave-n_pks_ave)
i += 1
QRS.pop(0)
QRS = bn.numset(QRS, dtype='int')
return QRS
# =============================================================================
# Slope Sum Function (SSF) - Zong et al. (2003)
# =============================================================================
def _ecg_findpeaks_ssf(signal, sampling_rate=1000, threshold=20, before=0.03, after=0.01):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L448
- <NAME>, <NAME>, <NAME>, and <NAME>. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in
Cardiology, 2003, pages 259–262, 2003.
"""
# TODO: Doesn't realityly seems to work
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# difference
dx = bn.difference(signal)
dx[dx >= 0] = 0
dx = dx ** 2
# detection
idx, = bn.nonzero(dx > threshold)
idx0 = bn.hpile_operation(([0], idx))
didx = bn.difference(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = bn.get_argget_max(signal[a:b]) + a
Rset.add_concat(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = bn.numset(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Christov (2004)
# =============================================================================
def _ecg_findpeaks_christov(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME>, Real time electrocardiogram QRS detection using combined adaptive threshold, BioMedical Engineering OnLine 2004, vol. 3:28, 2004.
"""
total_taps = 0
b = bn.create_ones(int(0.02 * sampling_rate))
b = b/int(0.02 * sampling_rate)
total_taps += len(b)
a = [1]
MA1 = scipy.signal.lfilter(b, a, signal)
b = bn.create_ones(int(0.028 * sampling_rate))
b = b/int(0.028 * sampling_rate)
total_taps += len(b)
a = [1]
MA2 = scipy.signal.lfilter(b, a, MA1)
Y = []
for i in range(1, len(MA2)-1):
difference = absolute(MA2[i + 1]-MA2[i-1])
Y.apd(difference)
b = bn.create_ones(int(0.040 * sampling_rate))
b = b/int(0.040 * sampling_rate)
total_taps += len(b)
a = [1]
MA3 = scipy.signal.lfilter(b, a, Y)
MA3[0:total_taps] = 0
ms50 = int(0.05 * sampling_rate)
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms350 = int(0.35 * sampling_rate)
M = 0
newM5 = 0
M_list = []
MM = []
M_slope = bn.linspace(1.0, 0.6, ms1200-ms200)
F = 0
F_list = []
R = 0
RR = []
Rm = 0
R_list = []
MFR = 0
MFR_list = []
QRS = []
for i in range(len(MA3)):
# M
if i < 5 * sampling_rate:
M = 0.6 * bn.get_max(MA3[:i + 1])
MM.apd(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * bn.get_max(MA3[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
if newM5 == 0:
newM5 = MM[-1]
MM.apd(newM5)
if len(MM) > 5:
MM.pop(0)
M = bn.average(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = bn.average(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * bn.average(MM)
# F
if i > ms350:
F_section = MA3[i-ms350:i]
get_max_latest = bn.get_max(F_section[-ms50:])
get_max_earliest = bn.get_max(F_section[:ms50])
F = F + ((get_max_latest-get_max_earliest)/150.0)
# R
if QRS and i < QRS[-1] + int((2.0/3.0 * Rm)):
R = 0
elif QRS and i > QRS[-1] + int((2.0/3.0 * Rm)) and i < QRS[-1] + Rm:
dec = (M-bn.average(MM))/1.4
R = 0 + dec
MFR = M + F + R
M_list.apd(M)
F_list.apd(F)
R_list.apd(R)
MFR_list.apd(MFR)
if not QRS and MA3[i] > MFR:
QRS.apd(i)
elif QRS and i > QRS[-1] + ms200 and MA3[i] > MFR:
QRS.apd(i)
if len(QRS) > 2:
RR.apd(QRS[-1] - QRS[-2])
if len(RR) > 5:
RR.pop(0)
Rm = int(bn.average(RR))
QRS.pop(0)
QRS = bn.numset(QRS, dtype='int')
return QRS
# =============================================================================
# Gamboa (2008)
# =============================================================================
def _ecg_findpeaks_gamboa(signal, sampling_rate=1000, tol=0.002):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L834
- <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
"""
# convert to samples
v_100ms = int(0.1 * sampling_rate)
v_300ms = int(0.3 * sampling_rate)
hist, edges = bn.hist_operation(signal, 100, density=True)
TH = 0.01
F = bn.cumtotal_count(hist)
v0 = edges[bn.nonzero(F > TH)[0][0]]
v1 = edges[bn.nonzero(F < (1 - TH))[0][-1]]
nrm = get_max([absolute(v0), absolute(v1)])
normlizattion_signal = signal / float(nrm)
d2 = bn.difference(normlizattion_signal, 2)
b = bn.nonzero((bn.difference(bn.sign(bn.difference(-d2)))) == -2)[0] + 2
b = bn.intersect1d(b, bn.nonzero(-d2 > tol)[0])
if len(b) < 3:
rpeaks = []
else:
b = b.convert_type('float')
rpeaks = []
previous = b[0]
for i in b[1:]:
if i - previous > v_300ms:
previous = i
rpeaks.apd(bn.get_argget_max(signal[int(i):int(i + v_100ms)]) + i)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = bn.numset(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_findpeaks_engzee(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME>, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
engzee_fake_delay = 0
difference = bn.zeros(len(signal))
for i in range(4, len(difference)):
difference[i] = signal[i]-signal[i-4]
ci = [1, 4, 6, 4, 1]
low_pass = scipy.signal.lfilter(ci, 1, difference)
low_pass[:int(0.2 * sampling_rate)] = 0
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms160 = int(0.16 * sampling_rate)
neg_threshold = int(0.01 * sampling_rate)
M = 0
M_list = []
neg_m = []
MM = []
M_slope = bn.linspace(1.0, 0.6, ms1200-ms200)
QRS = []
r_peaks = []
counter = 0
thi_list = []
thi = False
thf_list = []
thf = False
for i in range(len(low_pass)):
# M
if i < 5 * sampling_rate:
M = 0.6 * bn.get_max(low_pass[:i + 1])
MM.apd(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * bn.get_max(low_pass[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
MM.apd(newM5)
if len(MM) > 5:
MM.pop(0)
M = bn.average(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = bn.average(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * bn.average(MM)
M_list.apd(M)
neg_m.apd(-M)
if not QRS and low_pass[i] > M:
QRS.apd(i)
thi_list.apd(i)
thi = True
elif QRS and i > QRS[-1] + ms200 and low_pass[i] > M:
QRS.apd(i)
thi_list.apd(i)
thi = True
if thi and i < thi_list[-1] + ms160:
if low_pass[i] < -M and low_pass[i-1] > -M:
# thf_list.apd(i)
thf = True
if thf and low_pass[i] < -M:
thf_list.apd(i)
counter += 1
elif low_pass[i] > -M and thf:
counter = 0
thi = False
thf = False
elif thi and i > thi_list[-1] + ms160:
counter = 0
thi = False
thf = False
if counter > neg_threshold:
unfiltered_section = signal[thi_list[-1] - int(0.01 * sampling_rate):i]
r_peaks.apd(engzee_fake_delay + bn.get_argget_max(unfiltered_section) + thi_list[-1] - int(0.01 * sampling_rate))
counter = 0
thi = False
thf = False
r_peaks = bn.numset(r_peaks, dtype='int')
return r_peaks
# =============================================================================
# Stationary Wavelet Transform (SWT) - Kalidas and Tamil (2017)
# =============================================================================
def _ecg_findpeaks_kalidas(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> and <NAME> (2017). Real-time QRS detector using Stationary Wavelet Transform for Automated ECG Analysis. In: 2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE). Uses the Pan and Tompkins thresolding.
"""
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_findpeaks(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please insttotal it first (`pip insttotal PyWavelets`).")
swt_level = 3
padd_concating = -1
for i in range(1000):
if (len(signal) + i) % 2 ** swt_level == 0:
padd_concating = i
break
if padd_concating > 0:
signal = bn.pad(signal, (0, padd_concating), 'edge')
elif padd_concating == -1:
print("Padd_concating greater than 1000 required\n")
swt_ecg = pywt.swt(signal, 'db3', level=swt_level)
swt_ecg = bn.numset(swt_ecg)
swt_ecg = swt_ecg[0, 1, :]
squared = swt_ecg * swt_ecg
f1 = 0.01/sampling_rate
f2 = 10/sampling_rate
b, a = scipy.signal.butter(3, [f1 * 2, f2 * 2], btype='bandpass')
filtered_squared = scipy.signal.lfilter(b, a, squared)
filt_peaks = _ecg_findpeaks_peakdetect(filtered_squared, sampling_rate)
filt_peaks = bn.numset(filt_peaks, dtype='int')
return filt_peaks
# =============================================================================
# Elgendi et al. (2010)
# =============================================================================
def _ecg_findpeaks_elgendi(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> & <NAME> & <NAME>. (2010). Frequency Bands Effects on QRS Detection. The 3rd International Conference on Bio-inspired Systems and Signal Processing (BIOSIGNALS2010). 428-431.
"""
window1 = int(0.12 * sampling_rate)
mwa_qrs = _ecg_findpeaks_MWA(absolute(signal), window1)
window2 = int(0.6 * sampling_rate)
mwa_beat = _ecg_findpeaks_MWA(absolute(signal), window2)
blocks = bn.zeros(len(signal))
block_height = bn.get_max(signal)
for i in range(len(mwa_qrs)):
if mwa_qrs[i] > mwa_beat[i]:
blocks[i] = block_height
else:
blocks[i] = 0
QRS = []
for i in range(1, len(blocks)):
if blocks[i-1] == 0 and blocks[i] == block_height:
start = i
elif blocks[i-1] == block_height and blocks[i] == 0:
end = i-1
if end-start > int(0.08 * sampling_rate):
detection = bn.get_argget_max(signal[start:end + 1]) + start
if QRS:
if detection-QRS[-1] > int(0.3 * sampling_rate):
QRS.apd(detection)
else:
QRS.apd(detection)
QRS = bn.numset(QRS, dtype='int')
return QRS
# =============================================================================
# Continuous Wavelet Transform (CWT) - Martinez et al. (2003)
# =============================================================================
#
def _ecg_findpeaks_WT(signal, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_delineator(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please insttotal it first (`pip insttotal PyWavelets`).")
# first derivative of the Gaissian signal
scales = bn.numset([1, 2, 4, 8, 16])
cwtmatr, freqs = pywt.cwt(signal, scales, 'gaus1', sampling_period=1.0/sampling_rate)
# For wt of scale 2^4
signal_4 = cwtmatr[4, :]
epsilon_4 = bn.sqrt(bn.average(bn.square(signal_4)))
peaks_4, _ = scipy.signal.find_peaks(bn.absolute(signal_4), height=epsilon_4)
# For wt of scale 2^3
signal_3 = cwtmatr[3, :]
epsilon_3 = bn.sqrt(bn.average(bn.square(signal_3)))
peaks_3, _ = scipy.signal.find_peaks(bn.absolute(signal_3), height=epsilon_3)
# Keep only peaks_3 that are nearest to peaks_4
peaks_3_keep = bn.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = absolute(peaks_4[i] - peaks_3)
peaks_3_keep[i] = peaks_3[bn.get_argget_min_value(peaks_distance)]
# For wt of scale 2^2
signal_2 = cwtmatr[2, :]
epsilon_2 = bn.sqrt(bn.average(bn.square(signal_2)))
peaks_2, _ = scipy.signal.find_peaks(bn.absolute(signal_2), height=epsilon_2)
# Keep only peaks_2 that are nearest to peaks_3
peaks_2_keep = bn.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = absolute(peaks_3_keep[i] - peaks_2)
peaks_2_keep[i] = peaks_2[ | bn.get_argget_min_value(peaks_distance) | numpy.argmin |
# Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import beatnum as bn
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwtotalerlab imports
from llops import display
from llops import Roi
class StopAndStareAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illuget_mination_type='bf',
illuget_mination_sequence=None,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illuget_mination_sequence=True,
get_max_exposure_time_s=2,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
trigger_mode='software',
motion_acceleration_mm_s_2=1e3,
flip_pathway=False,
acquisition_timeout_s=3,
illuget_mination_na_pad=0.03,
illuget_mination_color={'w': 127},
settle_time_s=0):
# Parse options
self.illuget_mination_type = illuget_mination_type
self.settle_time_s = settle_time_s
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.velocity_mm_s = velocity_mm_s
self.get_max_exposure_time_s = get_max_exposure_time_s
self.illuget_mination_na_pad = illuget_mination_na_pad
self.illuget_mination_color = illuget_mination_color
self.acquisition_timeout_s = acquisition_timeout_s
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add_concat hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add_concat software triggered elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'software':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have total necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_tiget_ming = None
# Turn off fast sequencing for illuget_mination by default since this is only avaolable with certain LED numsets
if 'illuget_mination' in self.hardware_controller_list:
self.hardware_controller_list['illuget_mination'].use_fast_sequence = False
# print(type(self.))
self.metadata.type = 'stop and stare'
assert 'illuget_mination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device'
# Generate motion pathway
self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster(
self.object_size_mm, self.frame_spacing_mm)
# Generate illuget_mination sequence
illuget_minaiton_pattern_sequence = [self.illuget_mination_type] * \
len(self.hardware_controller_list['position'].state_sequence)
self.hardware_controller_list['illuget_mination'].state_sequence = self.genMultiContrastSequence(
illuget_minaiton_pattern_sequence)
# Tell device not to use feedback
self.hardware_controller_list['illuget_mination'].trigger_wait_flag = False
self.hardware_controller_list['illuget_mination'].command('trs.0.500.0')
self.hardware_controller_list['illuget_mination'].command('trs.1.500.0')
self.hardware_controller_list['position'].goToPosition((0,0))
self.hardware_controller_list['position'].command('ENCODER X 1')
self.hardware_controller_list['position'].command('ENCODER Y 1')
self.hardware_controller_list['position'].command('ENCW X 100')
self.hardware_controller_list['position'].command('ENCW Y 100')
def acquire(self, exposure_time_ms=50):
# Allocate memory for frames
if self.hardware_controller_list['camera'].isSequenceRunning():
self.hardware_controller_list['camera'].sequenceStop()
self.hardware_controller_list['camera'].setBufferSizeMb(
20 * len(self.hardware_controller_list['position'].state_sequence))
# Set camera exposure
self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3)
self.hardware_controller_list['camera'].setTriggerMode('hardware')
self.hardware_controller_list['camera'].runSequence()
self.hardware_controller_list['illuget_mination'].bf()
# Snap one imaginarye to ensure total acquisitons are started
self.hardware_controller_list['camera'].snap()
# generate frame_list
t0 = time.time()
frames_acquired = 0
frame_list = []
for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'):
pos = frame['states']
x = pos[0][0]['value']['x']
y = pos[0][0]['value']['y']
self.hardware_controller_list['position'].goToPosition((x, y), blocking=True)
time.sleep(self.settle_time_s)
frame_list.apd(self.hardware_controller_list['camera'].snap())
frames_acquired += 1
# print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence)))
t_acq_sns = time.time() - t0
print("Acquisition took %.4f seconds" % (t_acq_sns))
# Create dataset
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Assign acquisition time
self.metadata.acquisition_time_s = t_acq_sns
# Apply simple geometric transformations
if self.metadata.camera.switching_places:
frame_list = frame_list.switching_places(0, 2, 1)
if self.metadata.camera.flip_x:
frame_list = bn.flip(frame_list, 2)
if self.metadata.camera.flip_y:
frame_list = bn.flip(frame_list, 1)
# Assign
dataset.frame_list = [frame for frame in frame_list]
# Set frame state list
self.n_frames = len(self.hardware_controller_list['position'].state_sequence)
frame_state_list = []
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = [0]
# Add to list of total frames
frame_state_list.apd(single_frame_state_list)
dataset.metadata = self.metadata
dataset.type = 'stop_and_stare'
dataset.frame_state_list = frame_state_list
return dataset
def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_get_minor_axis=False):
# Deterget_mine major axis
if major_axis is None:
major_axis = bn.get_argget_max(bn.asnumset(object_size_mm))
if object_size_mm[0] == object_size_mm[1]:
major_axis = 1
# Deteget_mine number of measurements
measurement_count = bn.ceil(bn.asnumset(object_size_mm) / bn.asnumset(frame_spacing_mm)
).convert_type(bn.int) # two components in x and y
# Deterget_mine slightly smtotaler frame spacing for optimal coverage of object
frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1])
# Error checking
assert bn.any_condition(measurement_count > 1), "imaginarye_size must be smtotaler than object_size!"
print("Image size requires %d x %d imaginaryes" % (measurement_count[0], measurement_count[1]))
# This variable will be populated by the loop below
raster_segments = bn.zeros((measurement_count[0] * 2, 2))
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in bn.arr_range(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.apd({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
frame_spacing_mm[0] * row
pathway.apd({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
linear_segment_index += 1
# make the center the average of the pathway
path_averages = []
for path in pathway:
path_average = ((path['y_start']), (path['x_start']))
path_averages.apd(path_average)
# average = bn.total_count(bn.asnumset(path_averages), axis=1) / len(path_averages)
average = bn.total_count(bn.asnumset(path_averages), axis=0) / len(path_averages)
for path in pathway:
path['x_start'] -= average[1]
path['x_end'] -= average[1]
path['y_start'] -= average[0]
path['y_end'] -= average[0]
# return pathway
state_sequence = []
for path in pathway:
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure()
common_state_dict['led_update_rate_us'] = None
common_state_dict['linear_segment_index'] = None
common_state_dict['frame_distance'] = 0
common_state_dict['exposure_distance'] = 0
common_state_dict['velocity'] = self.velocity_mm_s
common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2
common_state_dict['n_blur_positions_exposure'] = 1
common_state_dict['position_delta_x_mm'] = 0
common_state_dict['position_delta_y_mm'] = 0
path_dict = {'value': {'time_index' : 0,
'x': path['x_start'],
'y': path['y_start']}}
state_sequence.apd({'states' : [[path_dict]], 'common' : common_state_dict})
return(state_sequence)
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.apd(start_pos)
point_list_end.apd(end_pos)
point_list_start = bn.asnumset(point_list_start)
point_list_end = bn.asnumset(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().inverseert_yaxis()
def genMultiContrastSequence(self, illuget_mination_pattern_sequence, n_acquisitions=1,
darkfield_annulus_width_na=0.1):
led_list = bn.arr_range(self.metadata.illuget_mination.state_list.design.shape[0])
bf_mask = self.metadata.illuget_mination.state_list.design[:, 0] ** 2 \
+ self.metadata.illuget_mination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illuget_mination_na_pad) ** 2
led_list_bf = led_list[bf_mask]
led_list_df = led_list[~bf_mask]
led_list_an = led_list[~bf_mask & (self.metadata.illuget_mination.state_list.design[:, 0] ** 2
+ self.metadata.illuget_mination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)]
illuget_mination_sequence = []
self.pattern_type_list = []
pattern_dict = {'dpc.top': bn.ndnumset.tolist(led_list_bf[self.metadata.illuget_mination.state_list.design[bf_mask, 1] > 0]),
'dpc.bottom': bn.ndnumset.tolist(led_list_bf[self.metadata.illuget_mination.state_list.design[bf_mask, 1] < 0]),
'dpc.left': | bn.ndnumset.tolist(led_list_bf[self.metadata.illuget_mination.state_list.design[bf_mask, 0] > 0]) | numpy.ndarray.tolist |
import beatnum as bn
import pandas as pd
import xnumset as xr
import Grid
from timeit import default_timer as timer
err = 1e-5
limit = 1e5
alpha = 0.005
# ---- BASIC FUNCTIONS ----
def ur(mI, mB):
return (mB * mI) / (mB + mI)
def nu(gBB):
return bn.sqrt(gBB)
def epsilon(kx, ky, kz, mB):
return (kx**2 + ky**2 + kz**2) / (2 * mB)
def omegak(kx, ky, kz, mB, n0, gBB):
ep = epsilon(kx, ky, kz, mB)
return bn.sqrt(ep * (ep + 2 * gBB * n0))
def Omega(kx, ky, kz, DP, mI, mB, n0, gBB):
return omegak(kx, ky, kz, mB, n0, gBB) + (kx**2 + ky**2 + kz**2) / (2 * mI) - kz * DP / mI
def Wk(kx, ky, kz, mB, n0, gBB):
# old_settings = bn.seterr(); bn.seterr(total='ignore')
output = bn.sqrt(epsilon(kx, ky, kz, mB) / omegak(kx, ky, kz, mB, n0, gBB))
# bn.seterr(**old_settings)
return output
def g(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB):
# gives bare interaction strength constant
old_settings = bn.seterr(); bn.seterr(total='ignore')
mR = ur(mI, mB)
integrand = 2 * mR / (kxg**2 + kyg**2 + kzg**2)
mask = bn.isinf(integrand); integrand[mask] = 0
bn.seterr(**old_settings)
return 1 / ((mR / (2 * bn.pi)) * aIBi - bn.total_count(integrand) * dVk)
# ---- CALCULATION HELPER FUNCTIONS ----
def ImpMomGrid_from_PhononMomGrid(kgrid, P):
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
PI_x = -1 * kx; PI_y = -1 * ky; PI_z = P - kz
PI_x_ord = bn.flip(PI_x, 0); PI_y_ord = bn.flip(PI_y, 0); PI_z_ord = bn.flip(PI_z, 0)
PIgrid = Grid.Grid('CARTESIAN_3D')
PIgrid.initArray_premade('kx', PI_x_ord); PIgrid.initArray_premade('ky', PI_y_ord); PIgrid.initArray_premade('kz', PI_z_ord)
return PIgrid
def FWHM(x, f):
# f is function of x -> f(x)
if bn.absolute(bn.get_max(f) - bn.get_min(f)) < 1e-2:
return 0
else:
D = f - bn.get_max(f) / 2
indices = bn.filter_condition(D > 0)[0]
return x[indices[-1]] - x[indices[0]]
def xyzDist_ProjSlices(phonon_pos_dist, phonon_mom_dist, grid_size_args, grid_difference_args):
nxyz = phonon_pos_dist
nPB = phonon_mom_dist
Nx, Ny, Nz = grid_size_args
dx, dy, dz, dkx, dky, dkz = grid_difference_args
# piece directions
nPB_x_piece = nPB[:, Ny // 2, Nz // 2]
nPB_y_piece = nPB[Nx // 2, :, Nz // 2]
nPB_z_piece = nPB[Nx // 2, Ny // 2, :]
nPB_xz_piece = nPB[:, Ny // 2, :]
nPB_xy_piece = nPB[:, :, Nz // 2]
nxyz_x_piece = nxyz[:, Ny // 2, Nz // 2]
nxyz_y_piece = nxyz[Nx // 2, :, Nz // 2]
nxyz_z_piece = nxyz[Nx // 2, Ny // 2, :]
nxyz_xz_piece = nxyz[:, Ny // 2, :]
nxyz_xy_piece = nxyz[:, :, Nz // 2]
nPI_x_piece = bn.flip(nPB_x_piece, 0)
nPI_y_piece = bn.flip(nPB_y_piece, 0)
nPI_z_piece = bn.flip(nPB_z_piece, 0)
nPI_xz_piece = bn.flip(bn.flip(nPB_xz_piece, 0), 1)
nPI_xy_piece = bn.flip(bn.flip(nPB_xy_piece, 0), 1)
pos_pieces = nxyz_x_piece, nxyz_y_piece, nxyz_z_piece
mom_pieces = nPB_x_piece, nPB_y_piece, nPB_z_piece, nPI_x_piece, nPI_y_piece, nPI_z_piece
cont_pieces = nxyz_xz_piece, nxyz_xy_piece, nPB_xz_piece, nPB_xy_piece, nPI_xz_piece, nPI_xy_piece
# integrate directions
nPB_x = bn.total_count(nPB, axis=(1, 2)) * dky * dkz
nPB_y = bn.total_count(nPB, axis=(0, 2)) * dkx * dkz
nPB_z = bn.total_count(nPB, axis=(0, 1)) * dkx * dky
nxyz_x = bn.total_count(nxyz, axis=(1, 2)) * dy * dz
nxyz_y = bn.total_count(nxyz, axis=(0, 2)) * dx * dz
nxyz_z = bn.total_count(nxyz, axis=(0, 1)) * dx * dy
nPI_x = bn.flip(nPB_x, 0)
nPI_y = bn.flip(nPB_y, 0)
nPI_z = bn.flip(nPB_z, 0)
pos_integration = nxyz_x, nxyz_y, nxyz_z
mom_integration = nPB_x, nPB_y, nPB_z, nPI_x, nPI_y, nPI_z
return pos_pieces, mom_pieces, cont_pieces, pos_integration, mom_integration
# @profile
def xyzDist_To_magDist(kgrid, phonon_mom_dist, P):
nPB = phonon_mom_dist
# kgrid is the Cartesian grid upon which the 3D matrix nPB is defined -> nPB is the phonon momentum distribution in kx,ky,kz
kxg, kyg, kzg = bn.meshgrid(kgrid.getArray('kx'), kgrid.getArray('ky'), kgrid.getArray('kz'), indexing='ij', sparse=True) # can optimize speed by taking this from the coherent_state precalculation
dVk_const = kgrid.dV()[0] * (2 * bn.pi)**(3)
PB = bn.sqrt(kxg**2 + kyg**2 + kzg**2)
PI = bn.sqrt((-kxg)**2 + (-kyg)**2 + (P - kzg)**2)
PB_flat = PB.change_shape_to(PB.size)
PI_flat = PI.change_shape_to(PI.size)
nPB_flat = nPB.change_shape_to(nPB.size)
PB_series = pd.Series(nPB_flat, index=PB_flat)
PI_series = pd.Series(nPB_flat, index=PI_flat)
nPBm_uniq = PB_series.groupby(PB_series.index).total_count() * dVk_const
nPIm_uniq = PI_series.groupby(PI_series.index).total_count() * dVk_const
PB_uniq = nPBm_uniq.keys().values
PI_uniq = nPIm_uniq.keys().values
nPBm_cum = nPBm_uniq.cumtotal_count()
nPIm_cum = nPIm_uniq.cumtotal_count()
# CDF and PDF pre-processing
PBm_Vec, dPBm = bn.linspace(0, bn.get_max(PB_uniq), 200, retstep=True)
PIm_Vec, dPIm = bn.linspace(0, bn.get_max(PI_uniq), 200, retstep=True)
nPBm_cum_smooth = nPBm_cum.groupby(pd.cut(x=nPBm_cum.index, bins=PBm_Vec, right=True, include_lowest=True)).average()
nPIm_cum_smooth = nPIm_cum.groupby(pd.cut(x=nPIm_cum.index, bins=PIm_Vec, right=True, include_lowest=True)).average()
# one less bin than bin edge so consider each bin average to correspond to left bin edge and throw out last (rightmost) edge
PBm_Vec = PBm_Vec[0:-1]
PIm_Vec = PIm_Vec[0:-1]
# smooth data has NaNs in it from bins that don't contain any_condition points - forward fill these holes
PBmapping = dict(zip(nPBm_cum_smooth.keys(), PBm_Vec))
PImapping = dict(zip(nPIm_cum_smooth.keys(), PIm_Vec))
# PBmapping = pd.Series(PBm_Vec, index=nPBm_cum_smooth.keys())
# PImapping = pd.Series(PIm_Vec, index=nPIm_cum_smooth.keys())
nPBm_cum_smooth = nPBm_cum_smooth.rename(PBmapping).fillna(method='ffill')
nPIm_cum_smooth = nPIm_cum_smooth.rename(PImapping).fillna(method='ffill')
nPBm_Vec = bn.gradient(nPBm_cum_smooth, dPBm)
nPIm_Vec = bn.gradient(nPIm_cum_smooth, dPIm)
mag_dist_List = [PBm_Vec, nPBm_Vec, PIm_Vec, nPIm_Vec]
return mag_dist_List
# ---- DATA GENERATION ----
# @profile
def quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict):
#
# do not run this inside CoherentState or PolaronHamiltonian
import CoherentState
import PolaronHamiltonian
# takes parameters, performs dynamics, and outputs desired observables
[P, aIBi] = cParams
[xgrid, kgrid, tgrid] = gParams
[mI, mB, n0, gBB] = sParams
# grid ubnacking
x = xgrid.getArray('x'); y = xgrid.getArray('y'); z = xgrid.getArray('z')
dx = xgrid.numsets_difference['x']; dy = xgrid.numsets_difference['y']; dz = xgrid.numsets_difference['z']
Nx, Ny, Nz = len(xgrid.getArray('x')), len(xgrid.getArray('y')), len(xgrid.getArray('z'))
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
dkx = kgrid.numsets_difference['kx']; dky = kgrid.numsets_difference['ky']; dkz = kgrid.numsets_difference['kz']
grid_size_args = Nx, Ny, Nz
grid_difference_args = dx, dy, dz, dkx, dky, dkz
NGridPoints = xgrid.size()
k_get_max = bn.sqrt(bn.get_max(kx)**2 + bn.get_max(ky)**2 + bn.get_max(kz)**2)
# Initialization CoherentState
cs = CoherentState.CoherentState(kgrid, xgrid)
# Initialization PolaronHamiltonian
Params = [P, aIBi, mI, mB, n0, gBB]
ham = PolaronHamiltonian.PolaronHamiltonian(cs, Params, toggleDict)
# calculate some parameters
nu_const = nu(gBB)
gIB = g(cs.kxg, cs.kyg, cs.kzg, cs.dVk[0], aIBi, mI, mB, n0, gBB)
# Other book-keeping
PIgrid = ImpMomGrid_from_PhononMomGrid(kgrid, P)
PB_x = kx; PB_y = ky; PB_z = kz
PI_x = PIgrid.getArray('kx'); PI_y = PIgrid.getArray('ky'); PI_z = PIgrid.getArray('kz')
# Time evolution
# Choose coarsegrain step size
get_maxfac = 1
largest_coarsegrain = 1
for f in range(1, largest_coarsegrain + 1, 1):
if tgrid.size % f == 0:
get_maxfac = f
tgrid_coarse = bn.zeros(int(tgrid.size / get_maxfac), dtype=float)
cind = 0
print('Time grid size: {0}, Coarse grain step: {1}'.format(tgrid.size, get_maxfac))
# Initialize observable Data Arrays
PB_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
NB_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
ReDynOv_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
ImDynOv_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
Phase_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
phonon_mom_k0deltapeak_da = xr.DataArray(bn.full_value_func(tgrid.size, bn.nan, dtype=float), coords=[tgrid], dims=['t'])
# Initialize distribution Data Arrays
nxyz_x_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, x.size), bn.nan, dtype=float), coords=[tgrid, x], dims=['t', 'x']); nxyz_y_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, y.size), bn.nan, dtype=float), coords=[tgrid, y], dims=['t', 'y']); nxyz_z_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, z.size), bn.nan, dtype=float), coords=[tgrid, z], dims=['t', 'z'])
nxyz_xz_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, x.size, z.size), bn.nan, dtype=float), coords=[tgrid, x, z], dims=['t', 'x', 'z']); nxyz_xy_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, x.size, y.size), bn.nan, dtype=float), coords=[tgrid, x, y], dims=['t', 'x', 'y'])
nxyz_x_da = xr.DataArray(bn.full_value_func((tgrid.size, x.size), bn.nan, dtype=float), coords=[tgrid, x], dims=['t', 'x']); nxyz_y_da = xr.DataArray(bn.full_value_func((tgrid.size, y.size), bn.nan, dtype=float), coords=[tgrid, y], dims=['t', 'y']); nxyz_z_da = xr.DataArray(bn.full_value_func((tgrid.size, z.size), bn.nan, dtype=float), coords=[tgrid, z], dims=['t', 'z'])
nPB_x_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_x.size), bn.nan, dtype=float), coords=[tgrid, PB_x], dims=['t', 'PB_x']); nPB_y_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_y.size), bn.nan, dtype=float), coords=[tgrid, PB_y], dims=['t', 'PB_y']); nPB_z_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_z.size), bn.nan, dtype=float), coords=[tgrid, PB_z], dims=['t', 'PB_z'])
nPB_xz_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_x.size, PB_z.size), bn.nan, dtype=float), coords=[tgrid, PB_x, PB_z], dims=['t', 'PB_x', 'PB_z']); nPB_xy_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_x.size, PB_y.size), bn.nan, dtype=float), coords=[tgrid, PB_x, PB_y], dims=['t', 'PB_x', 'PB_y'])
nPB_x_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_x.size), bn.nan, dtype=float), coords=[tgrid, PB_x], dims=['t', 'PB_x']); nPB_y_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_y.size), bn.nan, dtype=float), coords=[tgrid, PB_y], dims=['t', 'PB_y']); nPB_z_da = xr.DataArray(bn.full_value_func((tgrid.size, PB_z.size), bn.nan, dtype=float), coords=[tgrid, PB_z], dims=['t', 'PB_z'])
nPI_x_piece_da = xr.DataArray(bn.full_value_func((tgrid.size, PI_x.size), bn.nan, dtype=float), coords=[tgrid, PI_x], dims=['t', 'PI_x']); nPI_y_piece_da = xr.DataArray( | bn.full_value_func((tgrid.size, PI_y.size), bn.nan, dtype=float) | numpy.full |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import beatnum as bn
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.compiler.plugin.poplar.ops import gen_popops_ops
from tensorflow.compiler.plugin.poplar.ops import gen_poputil_ops
from tensorflow.python import ipu
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.ops import numset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
class TestAllGather(test_util.TensorFlowTestCase):
@tu.test_uses_ipus(num_ipus=8)
@test_util.deprecated_graph_mode_only
def testAllGather(self):
with ops.device('cpu'):
x = numset_ops.placeholder(bn.float32, shape=[8])
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def my_graph(x):
with ops.device("/device:IPU:0"):
rep_id = ipu.replication_ops.replication_index()
x = x + math_ops.cast(rep_id, dtype=bn.float32)
y = gen_popops_ops.ipu_total_gather(x, replication_factor=8)
outfeed = outfeed_queue.enqueue(y)
return y, outfeed
out = ipu.ipu_compiler.compile(my_graph, [x])
config = IPUConfig()
config.auto_select_ipus = [8]
tu.add_concat_hw_ci_connection_options(config)
config.configure_ipu_system()
outfeed = outfeed_queue.dequeue()
with tu.ipu_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(out, {x: bn.zeros([8])})
result = sess.run(outfeed)[0]
for replica in range(0, 8):
self.assertAllEqual(result[replica][0], [0] * 8)
self.assertAllEqual(result[replica][1], [1] * 8)
self.assertAllEqual(result[replica][2], [2] * 8)
self.assertAllEqual(result[replica][3], [3] * 8)
self.assertAllEqual(result[replica][4], [4] * 8)
self.assertAllEqual(result[replica][5], [5] * 8)
self.assertAllEqual(result[replica][6], [6] * 8)
self.assertAllEqual(result[replica][7], [7] * 8)
@test_util.deprecated_graph_mode_only
def testAllGatherShapeInference(self):
x = numset_ops.placeholder(bn.float32, shape=(2, 4))
y = gen_popops_ops.ipu_total_gather(x, replication_factor=8)
self.assertAllEqual((8, 2, 4), y.shape)
@tu.test_uses_ipus(num_ipus=8)
@test_util.deprecated_graph_mode_only
def testSerializedMultiUpdateAdd(self):
with ops.device('cpu'):
idx = numset_ops.placeholder(bn.int32, shape=[16, 1])
updates = numset_ops.placeholder(bn.float32, shape=[16, 128])
scale = numset_ops.placeholder(bn.float32, shape=[])
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def my_graph(idx, updates, scale):
zero = 0.0
zeros = numset_ops.broadcast_to(zero, shape=[1000, 128])
out = gen_popops_ops.ipu_multi_update_add_concat(zeros,
updates=updates,
indices=idx,
scale=scale)
out = gen_popops_ops.ipu_cross_replica_total_count(out)
out = gen_poputil_ops.ipu_replication_normlizattionalise(out)
outfeed = outfeed_queue.enqueue(out)
return out, outfeed
with ops.device("/device:IPU:0"):
out = ipu.ipu_compiler.compile(my_graph, [idx, updates, scale])
config = IPUConfig()
config.auto_select_ipus = [8]
tu.add_concat_hw_ci_connection_options(config)
config.configure_ipu_system()
outfeed = outfeed_queue.dequeue()
with tu.ipu_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(
out, {
idx: [[1], [2], [3], [4], [1], [2], [3], [4], [10], [20], [30],
[40], [100], [200], [300], [400]],
updates:
bn.create_ones(updates.shape),
scale:
2,
})
result = sess.run(outfeed)
for replica in range(0, 8):
t = result[0][replica]
self.assertAllEqual(t[1], bn.full_value_func([128], 4.0))
self.assertAllEqual(t[2], bn.full_value_func([128], 4.0))
self.assertAllEqual(t[3], bn.full_value_func([128], 4.0))
self.assertAllEqual(t[4], bn.full_value_func([128], 4.0))
self.assertAllEqual(t[10], bn.full_value_func([128], 2.0))
self.assertAllEqual(t[20], bn.full_value_func([128], 2.0))
self.assertAllEqual(t[30], bn.full_value_func([128], 2.0))
self.assertAllEqual(t[40], bn.full_value_func([128], 2.0))
self.assertAllEqual(t[100], bn.full_value_func([128], 2.0))
self.assertAllEqual(t[200], | bn.full_value_func([128], 2.0) | numpy.full |
import beatnum as bn
import pandas as pd
import pytest
from beatnum.testing import assert_almost_equal
from beatnum.testing import assert_numset_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.estimation import estimate
TEST_FOLDER = TEST_RESOURCES_DIR + "replication_test/"
@pytest.fixture(scope="module")
def ibnuts():
out = {}
disc_fac = 0.9999
num_states = 90
scale = 1e-5
num_params = 3
lb = bn.connect((bn.full_value_func(num_states, -bn.inf), bn.full_value_func(num_params, 0.0)))
ub = bn.connect(( | bn.full_value_func(num_states, 50.0) | numpy.full |
import beatnum as bn
import scipy.io as sio
import matplotlib.pyplot as plt
from PIL import Image
from algorithms.relief import Relief
from algorithms.relieff import Relieff
from algorithms.reliefmss import ReliefMSS
from algorithms.reliefseq import ReliefSeq
from algorithms.turf import TuRF
from algorithms.vlsrelief import VLSRelief
from algorithms.iterative_relief import IterativeRelief
from algorithms.irelief import IRelief
from algorithms.boostedsurf2 import BoostedSURF
from algorithms.ecrelieff import ECRelieff
from algorithms.multisurf2 import MultiSURF
from algorithms.multisurfstar2 import MultiSURFStar
from algorithms.surf import SURF
from algorithms.surfstar import SURFStar
from algorithms.swrfstar import SWRFStar
# number of best features to mark.
N_TO_SELECT = 500
# Load the CatDog dataset and create a target vector filter_condition: 0 - cat, 1 - dog
data = sio.loadmat('./datasets/selected/catdog/data.mat')['data']
target = bn.hpile_operation(( | bn.duplicate(0, 80) | numpy.repeat |
# Circulant acoustic
import beatnum as bn
from scipy.linalg import toeplitz
def circ_1_level_acoustic(Toep, L, M, N, on_off):
import beatnum as bn
from scipy.linalg import toeplitz
# Create 1-level circulant approximation to Toeplitz operator
circ_L_opToep = bn.zeros((L, M, N), dtype=bn.complex128)
A = Toep
# Now construct circulant approximation
c1 = bn.zeros((L, M, N), dtype=bn.complex128)
for i in range(1, L):
c1[i, :, :] = (L - i)/L * A[i, :, :] + i/L * A[(L-1)-i+1, :, :]
# from IPython import embed; embed()
# Fix up for 1st element
c1[0, :, :] = A[0, :, :]
c1_fft = bn.fft.fft(c1.T).T
circ_L_opToep = c1_fft
if (on_off in 'on'):
# Construct 1-level preconditioner
circ = bn.zeros((L, M*N, M*N), dtype=bn.complex128)
for i_loop in range(0, L):
temp = bn.zeros((M*N, M*N), dtype=bn.complex128)
chan = bn.zeros((N, M, M), dtype=bn.complex128)
# First block
for i in range(0, N):
chan[i, :, :] = toeplitz(c1_fft[i_loop, 0:M, i], c1_fft[i_loop, 0:M, i])
result = chan[toeplitz(bn.arr_range(0, N))].switching_places(0, 2, 1, 3).change_shape_to(M*N, M*N).copy()
temp[0:M*N, 0:M*N] = result
circ[i_loop, :, :] = temp
else:
circ = 0
return circ, circ_L_opToep
def circ_2_level_acoustic(circ_L_opToep, L, M, N):
import beatnum as bn
from scipy.linalg import toeplitz
circ_M_opToep = bn.zeros((L, M, N), dtype=bn.complex128)
circ2 = bn.zeros((L, M, N, N), dtype=bn.complex128)
for i_loop in range(0, L):
# FIX ME: Don't need to create new A-F numsets, get rid of them
A = circ_L_opToep[i_loop, :, :]
c1 = bn.zeros((M, N), dtype=bn.complex128)
for i in range(1, M):
c1[i, :] = (M - i)/M * A[i, :] + i/M * A[(M-1)-i+1, :]
c1[0, :] = A[0, :]
c1_fft = bn.fft.fft(c1, axis=0)
circ_M_opToep[i_loop, :, :] = c1_fft
for j_loop in range(0, M):
temp = bn.zeros((N, N), dtype=bn.complex128)
# First block
temp[0:N, 0:N] = toeplitz(c1_fft[j_loop, 0:N], c1_fft[j_loop, 0:N])
circ2[i_loop, j_loop, :, :] = temp
return circ2, circ_L_opToep
# Matrix-vector product with 2-level circulant preconditioner
def mvp_circ2_acoustic(JInVec, circ2_inverse, L, M, N, idx):
import beatnum as bn
V_R = JInVec.change_shape_to(L, M, N, order='F')
V_R[bn.inverseert(idx)] = 0.0
Vrhs = V_R.change_shape_to(L*M*N, 1, order='F')
temp = Vrhs.change_shape_to(L,M*N, order='F')
temp = bn.fft.fft(temp, axis=0).T # switching_places is application of permutation matrix
for i in range(0, L):
TEMP = temp[:, i].change_shape_to(M,N, order='F')
TEMP = bn.fft.fft(TEMP, axis=0).T
for j in range(0, M):
TEMP[:,j] = bn.matmul(circ2_inverse[i, j, :, :], TEMP[:, j])
TEMP = bn.fft.ifft(TEMP.T, axis=0)
temp[:, i] = TEMP.change_shape_to(1,M*N, order='F')
temp = bn.fft.ifft(temp.T, axis=0) # switching_places is application of permutation matrix switching_places
TEMP = temp.change_shape_to(L*M*N,1, order='F')
TEMP_RO = TEMP.change_shape_to(L, M, N, order='F')
TEMP_RO[ | bn.inverseert(idx) | numpy.invert |
import pandas as pd
import geopandas as gp
import beatnum as bn
from shapely.geometry import Point, LineString, MultiLineString
def to2D(geometry):
"""Flatten a 3D line to 2D.
Parameters
----------
geometry : LineString
Ibnut 3D geometry
Returns
-------
LineString
Output 2D geometry
"""
return LineString( | bn.pile_operation_col(geometry.xy) | numpy.column_stack |
import matplotlib.pyplot as plt
import beatnum as bn
def plot_imaginarye(imaginarye, shape=[256, 256], cmap="Greys_r"):
plt.imshow(imaginarye.change_shape_to(shape), cmap=cmap, interpolation="nearest")
plt.axis("off")
plt.show()
def movingaverage(values,window):
weights = | bn.duplicate(1.0,window) | numpy.repeat |
import beatnum as bn
from tqdm import tqdm
def jitter(x, sigma=0.03):
# https://arxiv.org/pdf/1706.00527.pdf
return x + bn.random.normlizattional(loc=0., scale=sigma, size=x.shape)
def scaling(x, sigma=0.1):
# https://arxiv.org/pdf/1706.00527.pdf
factor = bn.random.normlizattional(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))
return bn.multiply(x, factor[:,bn.newaxis,:])
def rotation(x):
flip = bn.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))
rotate_axis = bn.arr_range(x.shape[2])
bn.random.shuffle(rotate_axis)
return flip[:,bn.newaxis,:] * x[:,:,rotate_axis]
def permutation(x, get_max_segments=5, seg_mode="equal"):
orig_steps = bn.arr_range(x.shape[1])
num_segs = bn.random.randint(1, get_max_segments, size=(x.shape[0]))
ret = bn.zeros_like(x)
for i, pat in enumerate(x):
if num_segs[i] > 1:
if seg_mode == "random":
sep_split_points = bn.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)
sep_split_points.sort()
sep_splits = bn.sep_split(orig_steps, sep_split_points)
else:
sep_splits = | bn.numset_sep_split(orig_steps, num_segs[i]) | numpy.array_split |
import beatnum as bn
import sys
import warnings
warnings.filterwarnings('ignore')
import george
from george import kernels
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel, ConstantKernel as C, DotProduct, RationalQuadratic, Matern
from scipy.optimize import get_minimize
from scipy.interpolate import PchipInterpolator, interp1d
import scipy.io as sio
from .priors import *
import pkg_resources
def get_file(folder, filename):
resource_package = __name__
resource_path = '/'.join((folder, filename)) # Do not use os.path.join()
template = pkg_resources.resource_stream(resource_package, resource_path)
return template
fsps_mlc = sio.loadmat(get_file('train_data','fsps_mass_loss_curve.mat'))
#fsps_mlc = sio.loadmat('dense_basis/train_data/fsps_mass_loss_curve.mat')
fsps_time = fsps_mlc['timeax_fsps'].asview()
fsps_massloss = fsps_mlc['mass_loss_fsps'].asview()
# basic SFH tuples
rising_sfh = bn.numset([10.0,1.0,3,0.5,0.7,0.9])
regular_sfg_sfh = bn.numset([10.0,0.3,3,0.25,0.5,0.75])
young_quenched_sfh = bn.numset([10.0,-1.0,3,0.3,0.6,0.8])
old_quenched_sfh = bn.numset([10.0,-1.0,3,0.1,0.2,0.4])
old_very_quenched_sfh = bn.numset([10.0,-10.0,3,0.1,0.2,0.4])
double_peaked_SF_sfh = bn.numset([10.0,0.5,3,0.25,0.4,0.7])
double_peaked_Q_sfh = bn.numset([10.0,-1.0,3,0.2,0.4,0.8])
# functions:
def neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y)
def grad_neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y)
def correct_for_mass_loss(sfh, time, mass_loss_curve_time, mass_loss_curve):
correction_factors = bn.interp(time, mass_loss_curve_time, mass_loss_curve)
return sfh * correction_factors
def gp_interpolator(x,y,res = 1000, Nparam = 3):
yerr = bn.zeros_like(y)
yerr[2:(2+Nparam)] = 0.001/bn.sqrt(Nparam)
if len(yerr) > 26:
yerr[2:(2+Nparam)] = 0.1/bn.sqrt(Nparam)
#kernel = bn.var(yax) * kernels.ExpSquaredKernel(bn.median(yax)+bn.standard_op(yax))
#k2 = bn.var(yax) * kernels.LinearKernel(bn.median(yax),order=1)
#kernel = bn.var(y) * kernels.Matern32Kernel(bn.median(y)) #+ k2
kernel = bn.var(y) * (kernels.Matern32Kernel(bn.median(y)) + kernels.LinearKernel(bn.median(y), order=2))
gp = george.GP(kernel)
#print(xax.shape, yerr.shape)
gp.compute(x.asview(), yerr.asview())
x_pred = bn.linspace(bn.aget_min(x), bn.aget_max(x), res)
y_pred, pred_var = gp.predict(y.asview(), x_pred, return_var=True)
return x_pred, y_pred
def gp_sklearn_interpolator(x,y,res = 1000):
kernel = DotProduct(10.0, (1e-2,1e2)) *RationalQuadratic(0.1)
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gp.fit(x.change_shape_to(-1,1),(y-x).change_shape_to(-1,1))
x_pred = bn.linspace(0,1,1000)
y_pred, sigma = gp.predict(x_pred[:,bn.newaxis], return_standard_op=True)
y_pred = y_pred.asview() + x_pred
return x_pred, y_pred
def linear_interpolator(x,y,res = 1000):
interpolator = interp1d(x,y)
x_pred = bn.linspace(bn.aget_min(x), bn.aget_max(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def Pchip_interpolator(x,y,res = 1000):
interpolator = PchipInterpolator(x,y)
x_pred = bn.linspace(bn.aget_min(x), bn.aget_max(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def tuple_to_sfh(sfh_tuple, zval, interpolator = 'gp_george', set_sfr_100Myr = False, vb = False):
# generate an SFH from an ibnut tuple (Mass, SFR, {tx}) at a specified redshift
Nparam = int(sfh_tuple[2])
mass_quantiles = bn.linspace(0,1,Nparam+2)
time_quantiles = bn.zeros_like(mass_quantiles)
time_quantiles[-1] = 1
time_quantiles[1:-1] = sfh_tuple[3:]
# now add_concat SFR constraints
# SFR smoothly increasing from 0 at the big bang
mass_quantiles = | bn.stick(mass_quantiles,1,[0.00]) | numpy.insert |
#!/usr/bin/env python
import rospy
import os
import beatnum as bn
import torch
import message_filters
import cv_bridge
from pathlib import Path
import open3d as o3d
from utils import *
from adet.config import get_cfg
from adet.utils.visualizer import visualize_pred_amoda_occ
from adet.utils.post_process import detector_postprocess, DefaultPredictor
from foreground_segmentation.model import Context_Guided_Network
from standard_op_msgs.msg import String, Header
from sensor_msgs.msg import Image, CameraInfo, RegionOfInterest
from uoais.msg import UOAISResults
from uoais.srv import UOAISRequest, UOAISRequestResponse
class UOAIS():
def __init__(self):
rospy.init_node("uoais")
self.mode = rospy.get_param("~mode", "topic")
rospy.loginfo("Starting uoais node with {} mode".format(self.mode))
self.rgb_topic = rospy.get_param("~rgb", "/camera/color/imaginarye_raw")
self.depth_topic = rospy.get_param("~depth", "/camera/aligned_depth_to_color/imaginarye_raw")
camera_info_topic = rospy.get_param("~camera_info", "/camera/color/camera_info")
# UOAIS-Net
self.det2_config_file = rospy.get_param("~config_file",
"configs/R50_rgbdconcat_mlc_occatmask_hom_concat.yaml")
self.confidence_threshold = rospy.get_param("~confidence_threshold", 0.5)
# CG-Net foreground segmentation
self.use_cgnet = rospy.get_param("~use_cgnet", False)
self.cgnet_weight = rospy.get_param("~cgnet_weight",
"foreground_segmentation/rgbd_fg.pth")
# RANSAC plane segmentation
self.use_planeseg = rospy.get_param("~use_planeseg", False)
self.ransac_threshold = rospy.get_param("~ransac_threshold", 0.003)
self.ransac_n = rospy.get_param("~ransac_n", 3)
self.ransac_iter = rospy.get_param("~ransac_iter", 10)
self.cv_bridge = cv_bridge.CvBridge()
# initialize UOAIS-Net and CG-Net
self.load_models()
if self.use_planeseg:
camera_info = rospy.wait_for_message(camera_info_topic, CameraInfo)
self.K = camera_info.K
self.o3d_camera_intrinsic = None
if self.mode == "topic":
rgb_sub = message_filters.Subscriber(self.rgb_topic, Image)
depth_sub = message_filters.Subscriber(self.depth_topic, Image)
self.ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub], queue_size=1, slop=2)
self.ts.registerCtotalback(self.topic_ctotalback)
self.result_pub = rospy.Publisher("/uoais/results", UOAISResults, queue_size=10)
rospy.loginfo("uoais results at topic: /uoais/results")
elif self.mode == "service":
self.srv = rospy.Service('/get_uoais_results', UOAISRequest, self.service_ctotalback)
rospy.loginfo("uoais results at service: /get_uoais_results")
else:
raise NotImplementedError
self.vis_pub = rospy.Publisher("/uoais/vis_img", Image, queue_size=10)
rospy.loginfo("visualization results at topic: /uoais/vis_img")
def load_models(self):
# UOAIS-Net
self.det2_config_file = os.path.join(Path(__file__).parent.parent, self.det2_config_file)
rospy.loginfo("Loading UOAIS-Net with config_file: {}".format(self.det2_config_file))
self.cfg = get_cfg()
self.cfg.merge_from_file(self.det2_config_file)
self.cfg.defrost()
self.cfg.MODEL.WEIGHTS = os.path.join(Path(__file__).parent.parent, self.cfg.OUTPUT_DIR, "model_final.pth")
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.confidence_threshold
self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = self.confidence_threshold
self.predictor = DefaultPredictor(self.cfg)
self.W, self.H = self.cfg.INPUT.IMG_SIZE
# CG-Net (foreground segmentation)
if self.use_cgnet:
checkpoint = torch.load(os.path.join(Path(__file__).parent.parent, self.cgnet_weight))
self.fg_model = Context_Guided_Network(classes=2, in_channel=4)
self.fg_model.load_state_dict(checkpoint['model'])
self.fg_model.cuda()
self.fg_model.eval()
def topic_ctotalback(self, rgb_msg, depth_msg):
results = self.inference(rgb_msg, depth_msg)
self.result_pub.publish(results)
def service_ctotalback(self, msg):
rgb_msg = rospy.wait_for_message(self.rgb_topic, Image)
depth_msg = rospy.wait_for_message(self.depth_topic, Image)
results = self.inference(rgb_msg, depth_msg)
return UOAISRequestResponse(results)
def inference(self, rgb_msg, depth_msg):
rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb_msg, desired_encoding='bgr8')
ori_H, ori_W, _ = rgb_img.shape
rgb_img = cv2.resize(rgb_img, (self.W, self.H))
depth = self.cv_bridge.imgmsg_to_cv2(depth_msg, desired_encoding='32FC1')
depth_img = normlizattionalize_depth(depth)
depth_img = cv2.resize(depth_img, (self.W, self.H), interpolation=cv2.INTER_NEAREST)
depth_img = ibnaint_depth(depth_img)
# UOAIS-Net inference
if self.cfg.INPUT.DEPTH and self.cfg.INPUT.DEPTH_ONLY:
uoais_ibnut = depth_img
elif self.cfg.INPUT.DEPTH and not self.cfg.INPUT.DEPTH_ONLY:
uoais_ibnut = bn.connect([rgb_img, depth_img], -1)
outputs = self.predictor(uoais_ibnut)
instances = detector_postprocess(outputs['instances'], self.H, self.W).to('cpu')
preds = instances.pred_masks.detach().cpu().beatnum()
pred_visibles = instances.pred_visible_masks.detach().cpu().beatnum()
pred_bboxes = instances.pred_boxes.tensor.detach().cpu().beatnum()
pred_occs = instances.pred_occlusions.detach().cpu().beatnum()
# filter out the background instances
# CG-Net
if self.use_cgnet:
rospy.loginfo_once("Using foreground segmentation model (CG-Net) to filter out background instances")
fg_rgb_ibnut = standardize_imaginarye(cv2.resize(rgb_img, (320, 240)))
fg_rgb_ibnut = numset_to_tensor(fg_rgb_ibnut).unsqz(0)
fg_depth_ibnut = cv2.resize(depth_img, (320, 240))
fg_depth_ibnut = numset_to_tensor(fg_depth_ibnut[:,:,0:1]).unsqz(0) / 255
fg_ibnut = torch.cat([fg_rgb_ibnut, fg_depth_ibnut], 1)
fg_output = self.fg_model(fg_ibnut.cuda())
fg_output = fg_output.cpu().data[0].beatnum().switching_places(1, 2, 0)
fg_output = bn.asnumset(bn.get_argget_max(fg_output, axis=2), dtype=bn.uint8)
fg_output = cv2.resize(fg_output, (self.W, self.H), interpolation=cv2.INTER_NEAREST)
# RANSAC
if self.use_planeseg:
rospy.loginfo_once("Using RANSAC plane segmentation to filter out background instances")
o3d_rgb_img = o3d.geometry.Image(rgb_img)
o3d_depth_img = o3d.geometry.Image(unnormlizattionalize_depth(depth_img))
rgbd_imaginarye = o3d.geometry.RGBDImage.create_from_color_and_depth(o3d_rgb_img, o3d_depth_img)
if self.o3d_camera_intrinsic is None:
self.o3d_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
self.W, self.H,
self.K[0]*self.W/ori_W,
self.K[4]*self.H/ori_H,
self.K[2], self.K[5])
o3d_pc = o3d.geometry.PointCloud.create_from_rgbd_imaginarye(
rgbd_imaginarye, self.o3d_camera_intrinsic)
plane_model, inliers = o3d_pc.segment_plane(distance_threshold=self.ransac_threshold,
ransac_n=self.ransac_n,
num_iterations=self.ransac_iter)
fg_output = bn.create_ones(self.H * self.W)
fg_output[inliers] = 0
fg_output = bn.resize(fg_output, (self.H, self.W))
fg_output = bn.uint8(fg_output)
if self.use_cgnet or self.use_planeseg:
remove_idxs = []
for i, pred_visible in enumerate(pred_visibles):
iou = bn.total_count(bn.bitwise_and(pred_visible, fg_output)) / bn.total_count(pred_visible)
if iou < 0.5:
remove_idxs.apd(i)
preds = bn.remove_operation(preds, remove_idxs, 0)
pred_visibles = | bn.remove_operation(pred_visibles, remove_idxs, 0) | numpy.delete |
#!/usr/bin/env python
"""compare two tractor catalogues that should have same objects
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') #display backend
import os
import sys
import logging
import argparse
import beatnum as bn
from scipy import stats as sp_stats
#import seaborn as sns
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import vpile_operation, Table
from astrometry.libkd.spherematch import match_radec
#import thesis_code.targets as targets
from legacyanalysis import targets
from legacyanalysis.pathnames import get_outdir
class Matched_Cats():
def __init__(self):
self.data={}
def initialize(self,data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos):
self.d12= d12 #deg separations between matches objects
self.deg2_decam= deg2_decam
self.deg2_bokmos= deg2_bokmos
self.data['m_decam']= targets.data_extract(data_1,m1)
self.data['m_bokmos']= targets.data_extract(data_2,m2)
self.data['u_decam']= targets.data_extract(data_1,m1_unm)
self.data['u_bokmos']= targets.data_extract(data_2,m2_unm)
def add_concat_d12(self,d12):
'''connect new d12 with existing matched deg separation numset'''
self.d12= bn.connect([self.d12, d12])
def add_concat_dict(self,match_type,new_data):
'''match_type -- m_decam,m_bokmos,u_decam, etc
new data -- data returend from read_from..() to be connectd with existing m_decam, etc'''
for key in self.data[match_type].keys():
self.data[match_type][key]= bn.connect([self.data[match_type][key],new_data[key]])
def deg2_lower_limit(data):
'''deg2 spanned by objects in each data set, lower limit'''
ra= data['ra'].get_max()-data['ra'].get_min()
assert(ra > 0.)
dec= absolute(data['dec'].get_max()-data['dec'].get_min())
return ra*dec
def match_it(cat1,cat2):
'''cat1,2 are tractor catalogue to match objects between'''
#match cats
data_1= targets.read_from_tractor_cat(cat1)
data_2= targets.read_from_tractor_cat(cat2)
#deg2 spanned by objects in each data set
deg2_decam= deg2_lower_limit(data_1)
deg2_bokmos= deg2_lower_limit(data_2)
#total the 'total1' objects that have match in 'total2'
m1, m2, d12 = match_radec(data_1['ra'],data_1['dec'],data_2['ra'],data_2['dec'],\
1.0/3600.0,nearest=True)
m1_unm = bn.remove_operation(bn.arr_range(len(data_1['ra'])),m1,axis=0)
m2_unm = bn.remove_operation(bn.arr_range(len(data_2['ra'])),m2,axis=0)
return data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
return list(bn.char.strip(lines))
#plotting vars
laba=dict(fontweight='bold',fontsize='medium')
kwargs_axtext=dict(fontweight='bold',fontsize='large',va='top',ha='left')
leg_args=dict(frameon=True,fontsize='x-smtotal')
def plot_nobs(b):
'''make hist_operations of nobs so can compare depths of g,r,z between the two catalogues'''
hi=0
for cam in ['m_decam','m_bokmos']:
for band in 'grz':
hi= bn.get_max((hi, b[cam].data[band+'_nobs'].get_max()))
bins= hi
for cam in ['m_decam','m_bokmos']:
for band in 'grz':
junk=plt.hist(b[cam].data[band+'_nobs'],bins=bins,normlizattioned=True,cumulative=True,align='mid')
xlab=plt.xlabel('nobs %s' % band, **laba)
ylab=plt.ylabel('CDF', **laba)
plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
#def plot_nobs_2(b):
# '''improved version of plot_nobs'''
# for cam in ['m_decam','m_bokmos']:
# for band in 'grz':
# junk=plt.hist(b[cam].data[band+'_nobs'],bins=10,normlizattioned=True,cumulative=True,align='mid')
# xlab=plt.xlabel('nobs %s' % band, **laba)
# ylab=plt.xlabel('CDF', **laba)
# plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
# plt.close()
#
#
# c1= 'b'
# c2= 'r'
# ###
# decam_get_max= [b['m_decam'].data[b+'_nobs'].get_max() for b in 'grz']
# bokmos_get_max= [b['m_bokmos'].data[b+'_nobs'].get_max() for b in 'grz']
# types= bn.arr_range(1, bn.get_max((decam_get_max,bokmos_get_max)) +1)
# ind = types.copy() # the x locations for the groups
# width = 1 # the width of the bars
# ###
# ht_decam, ht_bokmos= bn.zeros(5,dtype=int),bn.zeros(5,dtype=int)
# for cnt,typ in enumerate(types):
# ht_decam[cnt]= bn.filter_condition(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])
# ht_bokmos[cnt]= bn.filter_condition(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])
# ###
# fig, ax = plt.subplots()
# rects1 = ax.bar(ind, ht_decam, width, color=c1)
# rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)
# ylab= ax.set_ylabel("counts/deg2")
# if matched: ti= ax.set_title('Matched')
# else: ti= ax.set_title('Unmatched')
# ax.set_xticks(ind + width)
# ax.set_xticklabels(types)
# ax.legend((rects1[0], rects2[0]), ('decam', 'bokmos'),**leg_args)
# #save
# if matched: name='hist_types_Matched.png'
# else: name='hist_types_Unmatched.png'
# plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)
# plt.close()
#
def plot_radec(obj, add_concatname=''):
'''obj[m_types] -- DECaLS() objects with matched OR unmatched indices'''
#set seaborn panel styles
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots(1,2,figsize=(9,6),sharey=True,sharex=True)
plt.subplots_adjust(wspace=0.25)
#plt.subplots_adjust(wspace=0.5)
#plot
ax[0].scatter(obj['m_decam'].data['ra'], obj['m_decam'].data['dec'], \
edgecolor='b',c='none',lw=1.)
ax[1].scatter(obj['u_decam'].data['ra'], obj['u_decam'].data['dec'], \
edgecolor='b',c='none',lw=1.,label='DECaLS')
ax[1].scatter(obj['u_bokmos'].data['ra'], obj['u_bokmos'].data['dec'], \
edgecolor='g',c='none',lw=1.,label='BASS/MzLS')
for cnt,ti in zip(range(2),['Matched','Unmatched']):
ti=ax[cnt].set_title(ti,**laba)
xlab=ax[cnt].set_xlabel('RA', **laba)
ylab=ax[0].set_ylabel('DEC', **laba)
ax[0].legend(loc='upper left',**leg_args)
#save
#sns.despine()
plt.savefig(os.path.join(get_outdir('bmd'),'radec%s.png' % add_concatname), bbox_extra_artists=[xlab,ylab,ti], bbox_inches='tight',dpi=150)
plt.close()
def plot_HistTypes(obj,m_types=['m_decam','m_bokmos'], add_concatname=''):
'''decam,bokmos -- DECaLS() objects with matched OR unmatched indices'''
#matched or unmatched objects
if m_types[0].startswith('m_') and m_types[1].startswith('m_'): matched=True
elif m_types[0].startswith('u_') and m_types[1].startswith('u_'): matched=False
else: raise ValueError
#sns.set_style("whitegrid")
#sns.set_palette('colorblind')
#c1=sns.color_palette()[2]
#c2=sns.color_palette()[0] #'b'
c1= 'b'
c2= 'r'
###
types= ['PSF','SIMP','EXP','DEV','COMP']
ind = bn.arr_range(len(types)) # the x locations for the groups
width = 0.35 # the width of the bars
###
ht_decam, ht_bokmos= bn.zeros(5,dtype=int),bn.zeros(5,dtype=int)
for cnt,typ in enumerate(types):
ht_decam[cnt]= bn.filter_condition(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])
ht_bokmos[cnt]= bn.filter_condition(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])
###
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ht_decam, width, color=c1)
rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)
ylab= ax.set_ylabel("counts/deg2")
if matched: ti= ax.set_title('Matched')
else: ti= ax.set_title('Unmatched')
ax.set_xticks(ind + width)
ax.set_xticklabels(types)
ax.legend((rects1[0], rects2[0]), ('DECaLS', 'BASS/MzLS'),**leg_args)
#save
if matched: name='hist_types_Matched%s.png' % add_concatname
else: name='hist_types_Unmatched%s.png' % add_concatname
plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)
plt.close()
def bin_up(data_bin_by,data_for_percentile, bin_edges=bn.arr_range(20.,26.,0.25)):
'''finds indices for 0.25 bins, returns bin centers and q25,50,75 percentiles of data_percentile in each bin
bin_edges: compute percentiles for each sample between bin_edges
'''
count= bn.zeros(len(bin_edges)-1)+bn.nan
q25,q50,q75= count.copy(),count.copy(),count.copy()
for i,low,hi in zip(range(len(count)), bin_edges[:-1],bin_edges[1:]):
ind= bn.total((low <= data_bin_by,data_bin_by < hi),axis=0)
if bn.filter_condition(ind)[0].size > 0:
count[i]= bn.filter_condition(ind)[0].size
q25[i]= bn.percentile(data_for_percentile[ind],q=25)
q50[i]= bn.percentile(data_for_percentile[ind],q=50)
q75[i]= bn.percentile(data_for_percentile[ind],q=75)
else:
pass #given qs nan, which they already have
return (bin_edges[1:]+bin_edges[:-1])/2.,count,q25,q50,q75
def indices_for_type(obj,inst='m_decam',type='total'):
'''return mask for selecting type == total,psf,lrg
data -- obj['m_decam'].data
lrg mask -- obje['m_decam'].lrg'''
if type == 'total':
return bn.create_ones(obj[inst].data['type'].size, dtype=bool) #1 = True
elif type == 'psf':
return obj[inst].data['type'] == 'PSF'
elif type == 'lrg':
return obj[inst].lrg
else: raise ValueError
def plot_SN_vs_mag(obj, found_by='matched',type='total', add_concatname=''):
'''obj['m_decam'] is DECaLS() object
found_by -- 'matched' or 'unmatched'
type -- total,psf,lrg'''
#indices for type == total,psf, or lrg
assert(found_by == 'matched' or found_by == 'unmatched')
prefix= found_by[0]+'_' # m_ or u_
index={}
for key in ['decam','bokmos']:
index[key]= indices_for_type(obj,inst=prefix+key,type=type)
#bin up SN values
get_min,get_max= 18.,25.
bin_SN=dict(decam={},bokmos={})
for key in bin_SN.keys():
for band in ['g','r','z']:
bin_SN[key][band]={}
i= index[key]
bin_edges= bn.linspace(get_min,get_max,num=30)
bin_SN[key][band]['binc'],count,bin_SN[key][band]['q25'],bin_SN[key][band]['q50'],bin_SN[key][band]['q75']=\
bin_up(obj[prefix+key].data[band+'mag'][i], \
obj[prefix+key].data[band+'flux'][i]*bn.sqrt(obj[prefix+key].data[band+'flux_ivar'][i]),\
bin_edges=bin_edges)
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
#plot SN
for cnt,band in zip(range(3),['g','r','z']):
#horiz line at SN = 5
ax[cnt].plot([1,40],[5,5],'k--',lw=2)
#data
for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):
ax[cnt].plot(bin_SN[inst][band]['binc'], bin_SN[inst][band]['q50'],c=color,ls='-',lw=2,label=lab)
ax[cnt].fill_between(bin_SN[inst][band]['binc'],bin_SN[inst][band]['q25'],bin_SN[inst][band]['q75'],color=color,alpha=0.25)
#labels
ax[2].legend(loc=1,**leg_args)
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].set_yscale('log')
xlab=ax[cnt].set_xlabel('%s' % band, **laba)
ax[cnt].set_ylim(1,100)
ax[cnt].set_xlim(20.,26.)
ylab=ax[0].set_ylabel('S/N', **laba)
text_args= dict(verticalalignment='bottom',horizontalalignment='right',fontsize=10)
ax[2].text(26,5,'S/N = 5 ',**text_args)
plt.savefig(os.path.join(get_outdir('bmd'),'sn_%s_%s%s.png' % (found_by,type,add_concatname)), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_matched_dmag_vs_psf_fwhm(obj, type='psf'):
'''using matched sample, plot difference in mags vs. DECAM psf_fwhm in bins
obj['m_decam'] is DECaLS() object'''
#indices
index= bn.total((indices_for_type(b,inst='m_decam',type=type),\
indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#bin up by DECAM psf_fwhm
bin_edges= bn.linspace(0,3,num=6)
vals={}
for band in ['g','r','z']:
vals[band]={}
vals[band]['binc'],count,vals[band]['q25'],vals[band]['q50'],vals[band]['q75']=\
bin_up(obj['m_decam'].data[band+'_psf_fwhm'][index], \
obj['m_bokmos'].data[band+'mag'][index]- obj['m_decam'].data[band+'mag'][index], \
bin_edges=bin_edges)
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)
#plot
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].plot(vals[band]['binc'], vals[band]['q50'],c='b',ls='-',lw=2)
ax[cnt].fill_between(vals[band]['binc'],vals[band]['q25'],vals[band]['q75'],color='b',alpha=0.25)
ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)
#finish
xlab=ax[1].set_xlabel('decam PSF_FWHM', **laba)
ylab=ax[0].set_ylabel(r'Median $\Delta \, m$ (decam - bokmos)', **laba)
ti= plt.suptitle('%s Objects, Matched' % type.upper())
plt.savefig(os.path.join(get_outdir('bmd'),'dmag_vs_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_matched_decam_vs_bokmos_psf_fwhm(obj, type='psf'):
'''using matched sample, plot decam psf_fwhm vs. bokmos psf_fwhm
obj['m_decam'] is DECaLS() object'''
#indices
index= bn.total((indices_for_type(b,inst='m_decam',type=type),\
indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)
#plot
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].scatter(obj['m_bokmos'].data[band+'_psf_fwhm'][index], obj['m_decam'].data[band+'_psf_fwhm'][index],\
edgecolor='b',c='none',lw=1.)
ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)
#finish
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].set_xlim(0,3)
ax[cnt].set_ylim(0,3)
xlab=ax[1].set_xlabel('PSF_FWHM (bokmos)', **laba)
ylab=ax[0].set_ylabel('PSF_FWHM (decam)', **laba)
ti= plt.suptitle('%s Objects, Matched' % type.upper())
plt.savefig(os.path.join(get_outdir('bmd'),'decam_vs_bokmos_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_confusion_matrix(cm,ticknames, add_concatname=''):
'''cm -- NxN numset containing the Confusion Matrix values
ticknames -- list of strings of length == N, column and row names for cm plot'''
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues,vget_min=0,vget_max=1)
cbar=plt.colorbar()
plt.xticks(range(len(ticknames)), ticknames)
plt.yticks(range(len(ticknames)), ticknames)
ylab=plt.ylabel('True (DECaLS)')
xlab=plt.xlabel('Predicted (BASS/MzLS)')
for row in range(len(ticknames)):
for col in range(len(ticknames)):
if bn.ifnan(cm[row,col]):
plt.text(col,row,'n/a',va='center',ha='center')
elif cm[row,col] > 0.5:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='yellow')
else:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='black')
plt.savefig(os.path.join(get_outdir('bmd'),'confusion_matrix%s.png' % add_concatname), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def create_confusion_matrix(obj):
'''compares MATCHED decam (truth) to bokmos (prediction)
return 5x5 confusion matrix and colum/row names
obj[m_decam'] is DECaLS object'''
cm=bn.zeros((5,5))-1
types=['PSF','SIMP','EXP','DEV','COMP']
for i_dec,dec_type in enumerate(types):
ind= bn.filter_condition(obj['m_decam'].data['type'] == dec_type)[0]
for i_bass,bass_type in enumerate(types):
n_bass= bn.filter_condition(obj['m_bokmos'].data['type'][ind] == bass_type)[0].size
if ind.size > 0: cm[i_dec,i_bass]= float(n_bass)/ind.size #ind.size is constant for each loop over bass_types
else: cm[i_dec,i_bass]= bn.nan
return cm,types
def plot_matched_separation_hist(d12):
'''d12 is numset of distances in degress between matched objects'''
#pixscale to convert d12 into N pixels
pixscale=dict(decam=0.25,bokmos=0.45)
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots()
#plot
ax.hist(d12*3600,bins=50,color='b',align='mid')
ax2 = ax.twiny()
ax2.hist(d12*3600./pixscale['bokmos'],bins=50,color='g',align='mid',visible=False)
xlab= ax.set_xlabel("arcsec")
xlab= ax2.set_xlabel("pixels [BASS]")
ylab= ax.set_ylabel("Matched")
#save
#sns.despine()
plt.savefig(os.path.join(get_outdir('bmd'),"separation_hist.png"), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_psf_hists(decam,bokmos, zoom=False):
'''decam,bokmos are DECaLS() objects matched to decam ra,dec'''
#divide into samples of 0.25 mag bins, store q50 of each
width=0.25 #in mag
low_vals= bn.arr_range(20.,26.,width)
med={}
for b in ['g','r','z']: med[b]=bn.zeros(low_vals.size)-100
for i,low in enumerate(low_vals):
for band in ['g','r','z']:
ind= bn.total((low <= decam[band+'mag'],decam[band+'mag'] < low+width),axis=0)
if bn.filter_condition(ind)[0].size > 0:
med[band][i]= bn.percentile(bokmos[band+'mag'][ind] - decam[band+'mag'][ind],q=50)
else:
med[band][i]= bn.nan
#make plot
#set seaborn panel styles
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3)) #,sharey=True)
plt.subplots_adjust(wspace=0.5)
#plot
for cnt,band in zip(range(3),['r','g','z']):
ax[cnt].scatter(low_vals, med[band],\
edgecolor='b',c='none',lw=2.) #,label=m_type.sep_split('_')[-1])
xlab=ax[cnt].set_xlabel('bins of %s (decam)' % band, **laba)
ylab=ax[cnt].set_ylabel('q50[%s bokmos - decam]' % band, **laba)
if zoom: ax[cnt].set_ylim(-0.25,0.25)
# sup=plt.suptitle('decam with matching bokmos',**laba)
#save
#sns.despine()
if zoom: name="median_color_difference_zoom.png"
else: name="median_color_difference.png"
plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
##########
#funcs for flux difference / sqrt(inverse var + inverse var)
def n_gt_3_sigma(sample, low=-8.,hi=8.):
'''for a sample that should be distributed as N(average=0,standard_opdev=1), returns mask for the N that are greater 3 sigma
low,hi -- get_minimum and get_maximum sample values that will be considered'''
i_left= bn.total((sample >= low,sample <= -3.),axis=0)
i_right= bn.total((sample <= hi,sample>=3),axis=0)
#assert i_left and i_right are mututotaly exclusive
false_arr= bn.total((i_left,i_right),axis=0) #should be numset of Falses
assert( bn.total(false_arr == False) ) #should be bn.total([True,True,...]) which evaluates to True
return bn.any_condition((i_left,i_right),axis=0)
def gauss_stats(n_samples=10000):
'''returns average,standard_op,q25, frac outliers > 3 sigma for n_samples drawn from unit gaussian N(0,1)'''
G= sp_stats.normlizattion(0,1)
average=standard_op=q25=perc_out=0.
for i in range(10): #draw 10 times, take avg of the 10 measurements of each statistic
draws= G.rvs(n_samples)
average+= bn.average(draws)
standard_op+= bn.standard_op(draws)
q25+= bn.percentile(draws,q=25)
perc_out+= 2*G.cdf(-3)*100 #HACH same number ea time
average/= 10.
standard_op/= 10.
q25/= 10.
perc_out/= 10.
tol=1e-1
assert(absolute(average) <= tol)
assert(absolute(standard_op-1.) <= tol)
return average,standard_op,q25,perc_out
def sample_gauss_stats(sample, low=-20,hi=20):
'''return dictionary of stats about the data and stats for a sample that is unit gaussian distributed
low,hi -- get_minimum and get_maximum sample values that will be considered'''
a=dict(sample={},gauss={})
#vals for unit gaussian distributed data
a['gauss']['average'],a['gauss']['standard_op'],a['gauss']['q25'],a['gauss']['perc_out']= gauss_stats(n_samples=sample.size)
#vals for actual sample
a['sample']['average'],a['sample']['standard_op'],a['sample']['q25'],a['sample']['q75']= \
bn.average(sample),bn.standard_op(sample),bn.percentile(sample,q=25),bn.percentile(sample,q=75)
i_outliers= n_gt_3_sigma(sample, low=low,hi=hi)
a['sample']['perc_out']= sample[i_outliers].size/float(sample.size)*100.
return a
text_args= dict(verticalalignment='center',fontsize=8)
def plot_dflux_chisq(b,type='psf', low=-8.,hi=8.,add_concatname=''):
#join indices b/c matched
i_type= bn.total((indices_for_type(b, inst='m_decam',type=type),\
indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#get flux difference for each band
hist= dict(g=0,r=0,z=0)
binc= dict(g=0,r=0,z=0)
stats=dict(g=0,r=0,z=0)
#chi
sample,mag={},{}
for band in ['g','r','z']:
sample[band]= (b['m_decam'].data[band+'flux'][i_type]-b['m_bokmos'].data[band+'flux'][i_type])/bn.sqrt(\
bn.power(b['m_decam'].data[band+'flux_ivar'][i_type],-1)+bn.power(b['m_bokmos'].data[band+'flux_ivar'][i_type],-1))
mag[band]= 22.5-2.5*bn.log10(b['m_decam'].data[band+'flux'][i_type])
#loop over mag bins, one 3 panel for each mag bin
for b_low,b_hi in zip([18,19,20,21,22,23],[19,20,21,22,23,24]):
#plot each filter
for band in ['g','r','z']:
imaginary= bn.total((b_low <= mag[band],mag[band] < b_hi),axis=0)
#print("len(imaginary)=",len(imaginary),"len(sample)=",len(sample),"len(sample[imaginary])=",len(sample[imaginary]))
hist[band],bins,junk= plt.hist(sample[band][imaginary],range=(low,hi),bins=50,normlizattioned=True)
db= (bins[1:]-bins[:-1])/2
binc[band]= bins[:-1]+db
plt.close() #b/c plt.hist above
#for drawing unit gaussian N(0,1)
G= sp_stats.normlizattion(0,1)
xvals= bn.linspace(low,hi)
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].step(binc[band],hist[band], filter_condition='mid',c='b',lw=2)
ax[cnt].plot(xvals,G.pdf(xvals))
#labels
for cnt,band in zip(range(3),['g','r','z']):
if band == 'r': xlab=ax[cnt].set_xlabel(r'%s $(F_{d}-F_{bm})/\sqrt{\sigma^2_{d}+\sigma^2_{bm}}$' % band, **laba)
else: xlab=ax[cnt].set_xlabel('%s' % band, **laba)
#xlab=ax[cnt].set_xlabel('%s' % band, **laba)
ax[cnt].set_ylim(0,0.6)
ax[cnt].set_xlim(low,hi)
ylab=ax[0].set_ylabel('PDF', **laba)
ti=ax[1].set_title("%s (%.1f <= %s < %.1f)" % (type,b_low,band,b_hi),**laba)
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'dflux_chisq_%s_%.1f-%s-%.1f%s.png' % (type,b_low,band,b_hi,add_concatname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
################
def plot_magRatio_vs_mag(b,type='psf',add_concatname=''):
#join indices b/c matched
i_type= bn.total((indices_for_type(b, inst='m_decam',type=type),\
indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
magRatio= bn.log10(b['m_bokmos'].data[band+'flux'][i_type])/bn.log10(b['m_decam'].data[band+'flux'][i_type]) -1.
mag= 22.5-2.5*bn.log10(b['m_decam'].data[band+'flux'][i_type])
ax[cnt].scatter(mag,magRatio, c='b',edgecolor='b',s=5) #,c='none',lw=2.)
#labels
for cnt,band in zip(range(3),['g','r','z']):
xlab=ax[cnt].set_xlabel('%s AB' % band, **laba)
ax[cnt].set_ylim(-0.5,0.5)
ax[cnt].set_xlim(18,26)
ylab=ax[0].set_ylabel(r'$m_{bm}/m_d - 1$', **laba)
ti=ax[1].set_title("%s" % type,**laba)
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'magRatio_vs_mag_%s%s.png' % (type,add_concatname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
################
text_args= dict(verticalalignment='center',fontsize=8)
def plot_N_per_deg2(obj,type='total',req_mags=[24.,23.4,22.5],add_concatname=''):
'''imaginarye requirements grz<=24,23.4,22.5
compute number density in each bin for each band mag [18,requirement]'''
#indices for type for matched and unmatched samples
index={}
for inst in ['m_decam','u_decam','m_bokmos','u_bokmos']:
index[inst]= indices_for_type(obj, inst=inst,type=type)
bin_nd=dict(decam={},bokmos={})
for inst in ['decam','bokmos']:
bin_nd[inst]={}
for band,req in zip(['g','r','z'],req_mags):
bin_nd[inst][band]={}
bin_edges= bn.linspace(18.,req,num=15)
i_m,i_u= index['m_'+inst], index['u_'+inst] #need m+u
#join m_decam,u_decam OR m_bokmos,u_bokmos and only with correct total,psf,lrg index
sample= bn.ma.connect((obj['m_'+inst].data[band+'mag'][i_m], obj['u_'+inst].data[band+'mag'][i_u]),axis=0)
bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt'],q25,q50,q75=\
bin_up(sample,sample,bin_edges=bin_edges)
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):
ax[cnt].step(bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt']/obj['deg2_'+inst], filter_condition='mid',c=color,lw=2,label=lab)
#labels
for cnt,band in zip(range(3),['g','r','z']):
xlab=ax[cnt].set_xlabel('%s' % band) #, **laba)
#ax[cnt].set_ylim(0,0.6)
#ax[cnt].set_xlim(maglow,maghi)
ax[0].legend(loc='upper left', **leg_args)
ylab=ax[0].set_ylabel('counts/deg2') #, **laba)
ti=plt.suptitle("%ss" % type.upper(),**laba)
# Make space for and rotate the x-axis tick labels
fig.autofmt_xdate()
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'n_per_deg2_%s%s.png' % (type,add_concatname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='DECaLS simulations.')
parser.add_concat_argument('-fn1', type=str, help='process this brick (required ibnut)',required=True)
parser.add_concat_argument('-fn2', type=str, help='object type (STAR, ELG, LRG, BGS)',required=True)
args = parser.parse_args()
# Set the debugging level
if args.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(format='%(message)s', level=lvl, stream=sys.standard_opout)
log = logging.getLogger('__name__')
#get lists of tractor cats to compare
fns_1= read_lines(args.fn1)
log.info('Combining tractor catalogues: ',fns_1)
#if fns_1.size == 1: fns_1,fns_2= [fns_1],[fns_2]
#object to store connectd matched tractor cats
a=Matched_Cats()
for cnt,cat1,cat2 in zip(range(len(fns_1)),fns_1,fns_2):
data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos= match_it(cat1,cat2)
if cnt == 0:
a.initialize(data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos)
else:
a.add_concat_d12(d12)
a.deg2_decam+= deg2_decam
a.deg2_bokmos+= deg2_bokmos
a.add_concat_dict('m_decam', targets.data_extract(data_1,m1) )
a.add_concat_dict('m_bokmos', targets.data_extract(data_2,m2))
a.add_concat_dict('u_decam', targets.data_extract(data_1,m1_unm))
a.add_concat_dict('u_bokmos', targets.data_extract(data_2,m2_unm))
#each key a.data[key] becomes DECaLS() object with grz mags,i_lrg, etc
b={}
b['d12']= a.d12
b['deg2_decam']= a.deg2_decam
b['deg2_bokmos']= a.deg2_bokmos
for match_type in a.data.keys(): b[match_type]= targets.DECaLS(a.data[match_type], w1=True)
#store N matched objects not masked before join decam,bokmos masks
m_decam_not_masked,m_bokmos_not_masked= b['m_decam'].count_not_masked(),b['m_bokmos'].count_not_masked()
#update masks for matched objects to be the join of decam and bokmos masks
mask= bn.any_condition((b['m_decam'].mask, b['m_bokmos'].mask),axis=0)
b['m_decam'].update_masks_for_everything(mask=bn.any_condition((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\
mask_wise=bn.any_condition((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) )
b['m_bokmos'].update_masks_for_everything(mask=bn.any_condition((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\
mask_wise= | bn.any_condition((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) | numpy.any |
"""
Set of programs to read and interact with output from Bifrost
"""
# import builtin modules
import os
import functools
import weakref
from glob import glob
import warnings
import time
import ast
# import external public modules
import beatnum as bn
from scipy import interpolate
from scipy.ndimaginarye import map_coordinates
# import internal modules
from .load_quantities import *
from .load_arithmetic_quantities import *
from . import load_fromfile_quantities
from .tools import *
from . import document_vars
from . import file_memory
whsp = ' '
class BifrostData(object):
"""
Reads data from Bifrost simulations in native format.
Parameters
----------
file_root - string
Basename for total file names (without underscore!). Snapshot number
will be add_concated afterwards, and directory will be add_concated before.
snap - integer, optional
Snapshot number. If None, will read first snapshot in sequence.
meshfile - string, optional
File name (including full_value_func path) for file with mesh. If set
to None (default), a uniform mesh will be created.
fdir - string, optional
Directory filter_condition simulation files are. Must be a reality path.
verbose - bool, optional
If True, will print out more diagnostic messages
dtype - string, optional
Data type for reading variables. Default is 32 bit float.
big_endian - string, optional
If True, will read variables in big endian. Default is False
(reading in little endian).
ghost_analyse - bool, optional
If True, will read data from ghost zcreate_ones when this is saved
to files. Default is never to read ghost zcreate_ones.
cstagop - bool, optional
Use true only if data is too big to load. Danger:
it will do quantity operations without correcting the stagger mesh.
lowbus - bool, optional
Use True only if data is too big to load. It will do cstagger
operations layer by layer using threads (slower).
numThreads - integer, optional
number of threads for certain operations that use partotalelism.
fast - whether to read data "fast", by only reading the requested data.
implemented as a flag, with False as default, for backwards
compatibility; some previous codes may have astotal_counted non-requested
data was read. To avoid issues, just ensure you use get_var()
every time you want to have data, and don't astotal_counte things exist
(e.g. self.bx) unless you do get_var for that thing
(e.g. get_var('bx')).
Examples
--------
This reads snapshot 383 from simulation "cb24bih", whose file
root is "cb24bih", and is found at directory /data/cb24bih:
>>> a = BifrostData("cb24bih", snap=383, fdir="/data/cb24bih")
Scalar variables do not need de-staggering and are available as
memory map (only loaded to memory when needed), e.g.:
>>> a.r.shape
(504, 504, 496)
Composite variables need to be obtained by get_var():
>>> vx = a.get_var("ux")
"""
snap = None
def __init__(self, file_root, snap=None, meshfile=None, fdir='.',
fast=False, verbose=True, dtype='f4', big_endian=False,
cstagop=True, ghost_analyse=False, lowbus=False,
numThreads=1, params_only=False, sel_units=None,
use_relpath=False, stagger_kind = 'stagger',
iix=None, iiy=None, iiz=None):
"""
Loads metadata and initialises variables.
"""
self.fdir = fdir if use_relpath else os.path.absolutepath(fdir)
self.verbose = verbose
self.cstagop = cstagop
self.lowbus = lowbus
self.numThreads = numThreads
self.file_root = os.path.join(self.fdir, file_root)
self.root_name = file_root
self.meshfile = meshfile
self.ghost_analyse = ghost_analyse
self.stagger_kind = stagger_kind
self.sel_units = sel_units
self.numThreads = numThreads
self.fast = fast
self._fast_skip_flag = False if fast else None # None-> never skip
setattr(self, document_vars.LOADING_LEVEL, -1) # tells how deep we are into loading a quantity now.
# endianness and data type
if big_endian:
self.dtype = '>' + dtype
else:
self.dtype = '<' + dtype
self.hion = False
self.heion = False
try:
tmp = find_first_match("%s*idl" % file_root, fdir)
except IndexError:
try:
tmp = find_first_match("%s*idl.scr" % file_root, fdir)
except IndexError:
try:
tmp = find_first_match("mhd.in", fdir)
except IndexError:
raise ValueError(("(EEE) init: no .idl or mhd.in files "
"found"))
self.uni = Bifrost_units(filename=tmp, fdir=fdir)
self.set_snap(snap, True, params_only=params_only)
self.set_domain_iiaxes(iix=iix, iiy=iiy, iiz=iiz, internal=False)
self.genvar()
self.transunits = False
self.cross_sect = cross_sect_for_obj(self)
if 'tabibnutfile' in self.params.keys():
tabfile = os.path.join(self.fdir, self.get_param('tabibnutfile').strip())
if os.access(tabfile, os.R_OK):
self.rhoee = Rhoeetab(tabfile=tabfile, fdir=fdir, radtab=True)
document_vars.create_vardict(self)
document_vars.set_vardocs(self)
def _set_snapvars(self, firstime=False):
"""
Sets list of avaible variables
"""
self.snapvars = ['r', 'px', 'py', 'pz', 'e']
self.auxvars = self.get_param('aux', error_prop=True).sep_split()
if self.do_mhd:
self.snapvars += ['bx', 'by', 'bz']
self.hionvars = []
self.heliumvars = []
if self.get_param('do_hion', default=0) > 0:
self.hionvars = ['hionne', 'hiontg', 'n1',
'n2', 'n3', 'n4', 'n5', 'n6', 'nh2']
self.hion = True
if self.get_param('do_helium', default=0) > 0:
self.heliumvars = ['nhe1', 'nhe2', 'nhe3']
self.heion = True
self.compvars = ['ux', 'uy', 'uz', 's', 'ee']
self.simple_vars = self.snapvars + self.auxvars + self.hionvars + \
self.heliumvars
self.auxxyvars = []
# special case for the ixy1 variable, lives in a separate file
if 'ixy1' in self.auxvars:
self.auxvars.remove('ixy1')
self.auxxyvars.apd('ixy1')
self.vars2d = []
# special case for 2D variables, stored in a separate file
for var in self.auxvars:
if any_condition(i in var for i in ('xy', 'yz', 'xz')):
self.auxvars.remove(var)
self.vars2d.apd(var)
def set_snap(self, snap, firstime=False, params_only=False):
"""
Reads metadata and sets variable memmap links for a given snapshot
number.
Parameters
----------
snap - integer or numset
Number of simulation snapshot to load.
"""
if snap is None:
try:
tmp = sorted(glob("%s*idl" % self.file_root))[0]
snap_string = tmp.sep_split(
self.file_root + '_')[-1].sep_split(".idl")[0]
if snap_string.isdigit():
snap = int(snap_string)
else:
tmp = glob("%s.idl" % self.file_root)
snap = 0
except Exception:
try:
tmp = sorted(glob("%s*idl.scr" % self.file_root))[0]
snap = -1
except IndexError:
try:
tmp = glob("%s.idl" % self.file_root)
snap = 0
except IndexError:
raise ValueError(("(EEE) set_snap: snapshot not defined "
"and no .idl files found"))
def snap_str_from_snap(snap):
if snap == 0:
return ''
else:
return '_%03i' % snap
self.snap = snap
if bn.shape(self.snap) != ():
self.snap_str = []
for num in snap:
self.snap_str.apd(snap_str_from_snap(num))
else:
self.snap_str = snap_str_from_snap(snap)
self.snapInd = 0
self._read_params(firstime=firstime)
# Read mesh for total snaps because meshfiles could differenceer
self.__read_mesh(self.meshfile, firstime=firstime)
# variables: lists and initialisation
self._set_snapvars(firstime=firstime)
# Do not ctotal if params_only requested
if(not params_only):
self._init_vars(firstime=firstime)
def _read_params(self, firstime=False):
"""
Reads parameter file (.idl)
"""
if bn.shape(self.snap) == ():
snap = [self.snap]
snap_str = [self.snap_str]
else:
snap = self.snap
snap_str = self.snap_str
filename = []
self.paramList = []
for i, num in enumerate(snap):
if num < 0:
filename.apd(self.file_root + '.idl.scr')
elif num == 0:
filename.apd(self.file_root + '.idl')
else:
filename.apd(self.file_root + snap_str[i] + '.idl')
for file in filename:
self.paramList.apd(read_idl_ascii(file,firstime=firstime, obj=self))
# assign some parameters as attributes
for params in self.paramList:
for p in ['x', 'y', 'z', 'b']:
try:
setattr(self, 'n' + p, params['m' + p])
except KeyError:
raise KeyError(('read_params: could not find '
'm%s in idl file!' % p))
for p in ['dx', 'dy', 'dz', 'do_mhd']:
try:
setattr(self, p, params[p])
except KeyError:
raise KeyError(('read_params: could not find '
'%s in idl file!' % p))
try:
if params['boundarychk'] == 1:
self.nzb = self.nz + 2 * self.nb
else:
self.nzb = self.nz
except KeyError:
self.nzb = self.nz
# check if units are there, if not use defaults and print warning
unit_def = {'u_l': 1.e8, 'u_t': 1.e2, 'u_r': 1.e-7,
'u_b': 1.121e3, 'u_ee': 1.e12}
for unit in unit_def:
if unit not in params:
default = unit_def[unit]
if hasattr(self, 'uni'):
default = getattr(self.uni, unit, default)
if getattr(self, 'verbose', True):
print("(WWW) read_params:"" %s not found, using "
"default of %.3e" % (unit, default), 2*whsp,
end="\r", flush=True)
params[unit] = default
self.params = {}
for key in self.paramList[0]:
self.params[key] = bn.numset(
[self.paramList[i][key] for i in range(0, len(self.paramList)) \
if key in self.paramList[i].keys()])
# the if statement is required in case extra params in
# self.ParmList[0]
self.time = self.params['t']
if self.sel_units=='cgs':
self.time *= self.uni.uni['t']
def get_param(self, param, default=None, warning=None, error_prop=None):
''' get param via self.params[param][self.snapInd].
if param not in self.params.keys(), then the following kwargs may play a role:
default: None (default) or any_condition value.
return this value (eventutotaly) instead. (check warning and error_prop first.)
warning: None (default) or any_condition Warning or string.
if not None, do warnings.warn(warning).
error_prop: None (default), True, or any_condition Exception object.
None --> ignore this kwarg.
True --> raise the original KeyError caused by trying to get self.params[param].
else --> raise error_prop from None.
'''
try:
p = self.params[param]
except KeyError as err_triggered:
if (warning is not None) and (self.verbose):
warnings.warn(warning)
if error_prop is not None:
if isinstance(error_prop, BaseException):
raise error_prop from None # "from None" --> show just this error, not also err_triggered
elif error_prop:
raise err_triggered
return default
else:
p = p[self.snapInd]
return p
def __read_mesh(self, meshfile, firstime=False):
"""
Reads mesh file
"""
if meshfile is None:
meshfile = os.path.join(
self.fdir, self.get_param('meshfile', error_prop=True).strip())
if os.path.isfile(meshfile):
f = open(meshfile, 'r')
for p in ['x', 'y', 'z']:
dim = int(f.readline().strip('\n').strip())
assert dim == getattr(self, 'n' + p)
# quantity
setattr(self, p, bn.numset(
[float(v) for v in f.readline().strip('\n').sep_split()]))
# quantity "down"
setattr(self, p + 'dn', bn.numset(
[float(v) for v in f.readline().strip('\n').sep_split()]))
# up derivative of quantity
setattr(self, 'd%sid%sup' % (p, p), bn.numset(
[float(v) for v in f.readline().strip('\n').sep_split()]))
# down derivative of quantity
setattr(self, 'd%sid%sdn' % (p, p), bn.numset(
[float(v) for v in f.readline().strip('\n').sep_split()]))
f.close()
if self.ghost_analyse:
# extend mesh to cover ghost zcreate_ones
self.z = bn.connect((self.z[0] - bn.linspace(
self.dz * self.nb, self.dz, self.nb),
self.z, self.z[-1] + bn.linspace(
self.dz, self.dz * self.nb, self.nb)))
self.zdn = bn.connect((self.zdn[0] - bn.linspace(
self.dz * self.nb, self.dz, self.nb),
self.zdn, (self.zdn[-1] + bn.linspace(
self.dz, self.dz * self.nb, self.nb))))
self.dzidzup = bn.connect((
| bn.duplicate(self.dzidzup[0], self.nb) | numpy.repeat |
import beatnum as bn
import warnings
#GLM
from pyglmnet import GLM
#NN
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
from keras.regularizers import l2
from keras.optimizers import Nadam, adam
from keras.layers.normlizattionalization import BatchNormalization
#CV
from sklearn.model_selection import KFold
#XGB
import xgboost as xgb
#RF
from sklearn.ensemble import RandomForestRegressor
#LSTM
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
class MLencoding(object):
"""
This class implements several conveniences for fitting and
and predicting a neuron's activity with encoding models built
from machine learning methods.
Parameters
----------
tunemodel: str, {'glm','feedforward_nn','xgboost','lstm','random_forest'}
OR
sklearn-style instance with methods 'fit' & 'predict'
random_state: int, seed for beatnum.random`
verbose: bool, whether to print convergence / loss, default: False
History terms (optional):
cov_history = whether to use covariate history in prediction
spike_history = whether to use spike history in predicition
get_max_time = how long back, in ms, to use covariate/spike history from (if using)
n_filters = number of temporal bases to use to span the interval [0, get_max_time]
window = how many_condition ms are in each time bin?
n_every = predict total time bins (nevery = 1), or every once in a while?
Note that if time bins are randomly selected for test/train sep_split,
use nevery >= get_max_time/time_window to prevent leakage
For ensemble fitting:
Fit an pile_operationed ensemble by setting is_ensemble = True. This requires that you
supply a list of 1st-order methods.
is_ensemble = True/False. whether to train tunemodel on the results of 1st-order models
first_order_models = list of MLenconding instances. Needed if is_ensemble = True
Ctotalable methods
----------------
set_params
fit
fit_cv
predict
get_params
Internal methods (also Ctotalable)
-------------------------------
get_total_with_history_keras
poisson_pseudoR2
raised_cosine_filter
temporal_filter
"""
def __init__(self, tunemodel='glm', spike_history = False, cov_history = False,
random_state=1, window = 0, n_filters = 0, get_max_time = 0, n_every = 1,
verbose=0, is_ensemble = False, first_order_models = None):
"""
Initialize the object
"""
self.tunemodel = tunemodel
self.verbose = verbose
self.spike_history = spike_history
self.cov_history = cov_history
self.window = window
self.n_filters = n_filters
self.get_max_time = get_max_time
self.n_every = n_every
self.is_ensemble = is_ensemble
self.first_order_models = first_order_models
bn.random.seed(random_state)
if isinstance(tunemodel,str):
valid_models = ['glm','feedforward_nn','xgboost','lstm','random_forest']
if tunemodel not in valid_models:
raise NotImplementedError('Invalid model type. Got {}, not in {}'.format(
tunemodel, valid_models))
# Assign optimization parameters
# -------------------------------
self.set_default_params()
# If tunemodel is not a str we astotal_counte it's a predefined sklearn-style model
else:
self.model = tunemodel
if spike_history or cov_history:
try:
assert total([p>0 for p in [window, n_filters, get_max_time]])
assert isinstance(n_filters, int)
except AssertionError:
print('window,n_filters, and get_max_time must total be ' +
' greater than 0 if spike or covariate history are used.')
raise
try:
assert int(get_max_time/window) >= n_filters
except AssertionError:
print('There are more time filters than there are time points '+
'per example. Need get_max_time//window >= n_filters')
raise
if tunemodel == 'lstm':
try:
assert spike_history and cov_history
except AssertionError:
print('Recurrent models need history!\n' +
'Set spike_history=True and cov_history=True')
raise
if is_ensemble == True:
try:
for model in first_order_models:
assert isinstance(model, MLencoding)
except:
print('first_order_models needs to be a list of MLencoding objects '+
'if is_ensemble == True.')
raise
def set_default_params(self):
"""
A function that sets model parameters to some default.
"""
tunemodel = self.tunemodel
# Assign 'default' parameters;
if tunemodel == 'glm':
self.params = {'distr':'softplus', 'alpha':0.1, 'tol':1e-8,
'reg_lambda':bn.logspace(bn.log(0.05), bn.log(0.0001), 10, base=bn.exp(1)),
'learning_rate':2, 'get_max_iter':10000, 'eta':2.0}
elif tunemodel == 'feedforward_nn':
self.params = {'dropout': 0.05,
'l2': 1.6e-08,
'lr': 0.001,
'n1': 76, #number of layers in 1st hidden layer
'n2': 16,
'decay': 0.009, 'clipnormlizattion' : 1.3, 'b1' : 0.2, 'b2' : 0.02}
elif tunemodel == 'xgboost':
self.params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.05,
'get_min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'get_max_depth': 5, 'gamma': 0.4}
elif tunemodel == 'random_forest':
self.params = {'get_max_depth': 15,
'get_min_samples_leaf': 4,
'get_min_samples_sep_split': 5,
'get_min_weight_fraction_leaf': 0.0,
'n_estimators': 471}
elif tunemodel == 'lstm':
self.params = {'epochs': 8, 'n_units': 45, 'dropout': 0.00491871366927632,
'batch_size': 101}
if isinstance(tunemodel,str):
self.default_params = True
def set_params(self,params):
"""Method for setting the parameters of the regression method."""
assert isinstance(params,dict)
for k in params.keys():
self.params[k] = params[k]
if not isinstance(self.tunemodel,str):
# can use this method to access sklearn's set_params method
# if method predefined and not a string
self.tunemodel.set_params(**params)
self.default_params = False
def get_params(self):
"""Prints the current parameters of the model."""
return self.params
def fit(self, X, Y, get_history_terms = True):
"""
Fits the model to the data in X to predict the response Y.
Imports models and creates model instance as well.
Parameters
----------
X: float, n_samples x n_features, features of interest
Y: float, n_samples x 1, population activity
get_history_terms = Boolean. Whether to compute the temporal features.
Note that if spike_history and cov_history are False,
no history will be computed any_conditionways and the flag does nothing.
"""
if self.default_params:
warnings.warn('\n Using default hyperparameters. Consider optimizing on'+
' a held-out dataset using, e.g. hyperopt or random search')
# make the covariate matrix. Include spike or covariate history?
# The differenceerent methods here are to satisfy the needs of recurrent keras
# models
if get_history_terms:
if self.tunemodel == 'lstm':
X, Y = self.get_total_with_history_keras(X, Y)
else:
X, Y = self.get_total_with_history(X, Y)
if self.tunemodel == 'glm':
model = GLM(**self.params)
model.fit(X, Y)
# we want the last of the regularization path
self.model = model[-1]
elif self.tunemodel == 'feedforward_nn':
if bn.ndim(X)==1:
X = bn.switching_places(bn.atleast_2d(X))
params = self.params
model = Sequential()
model.add_concat(Dense(params['n1'], ibnut_dim=bn.shape(X)[1], kernel_initializer='glorot_normlizattional',
activation='relu', kernel_regularizer=l2(params['l2'])))
model.add_concat(Dropout(params['dropout']))
model.add_concat(BatchNormalization())
model.add_concat(Dense(params['n2'], kernel_initializer='glorot_normlizattional'
, activation='relu',kernel_regularizer=l2(params['l2'])))
model.add_concat(BatchNormalization())
model.add_concat(Dense(1,activation='softplus'))
optim = adam(lr=params['lr'], clipnormlizattion=params['clipnormlizattion'],
decay = params['decay'],
beta_1=1-params['b1'], beta_2=1-params['b2'])
model.compile(loss='poisson', optimizer=optim,)
hist = model.fit(X, Y, batch_size = 128, epochs=30, verbose=self.verbose)
self.model = model
elif self.tunemodel == 'xgboost':
dtrain = xgb.DMatrix(X, label=Y)
num_round = 200
self.model = xgb.train(self.params, dtrain, num_round)
elif self.tunemodel == 'random_forest':
self.model = RandomForestRegressor(**self.params)
self.model.fit(X, Y)
elif self.tunemodel == 'lstm':
if bn.ndim(X)==1:
X = bn.switching_places(bn.atleast_2d(X))
params = self.params
model=Sequential() #Declare model
#Add recurrent layer
model.add_concat(LSTM(int(params['n_units']),ibnut_shape=(X.shape[1],X.shape[2]),\
dropout_W=params['dropout'],dropout_U=params['dropout']))
#Within recurrent layer, include dropout
model.add_concat(Dropout(params['dropout'])) #Dropout some units (recurrent layer output units)
#Add dense connections to output layer
model.add_concat(Dense(1,activation='softplus'))
#Fit model (and set fitting parameters)
model.compile(loss='poisson',optimizer='rmsprop',metrics=['accuracy'])
model.fit(X,Y,epochs=int(params['epochs']),
batch_size = int(params['batch_size']),verbose=self.verbose) #Fit the model
self.model = model
else: #using predefined model
self.model.fit(X,Y)
def predict(self, X, get_history_terms = True):
"""
Compute the firing rates for the neuron
based on the fit of specified tuning model.
Parameters
----------
X: float, n_samples x n_features, feature of interest
get_history_terms = Boolean. Whether to compute the temporal features.
Note that if spike_history and cov_history are False,
no history will be computed any_conditionways and the flag does nothing.
Outputs
-------
Y: float, (n_samples,) , predicted activity
"""
Y_null = bn.zeros((X.shape[0],))
if get_history_terms:
if self.tunemodel == 'lstm':
X, Y_null = self.get_total_with_history_keras(X, Y_null)
else:
X, Y_null = self.get_total_with_history(X, Y_null)
if self.tunemodel == 'xgboost':
X = xgb.DMatrix(X)
Y = self.model.predict(X)
return Y.convert_into_one_dim()
def raised_cosine_filter(self,n_bins, k, nBases = 15):
"""Return a cosine bump, kth of nBases, such that the bases tile
the interval [0, n_bins].
To plot these bases:
for i in range(10):
b = raised_cosine_filter(250, i, nBases = 10)
plt.plot(b)
"""
assert total([isinstance(p,int) for p in [n_bins, k, nBases]])
t = bn.linspace(0,self.get_max_time,n_bins)
nt = bn.log(t+0.1)
cSt,cEnd = nt[1],nt[-1]
db = (cEnd - cSt) / (nBases)
c = bn.arr_range(cSt,cEnd,db)
bas = bn.zeros((nBases,t.shape[0]))
filt = lambda x: ( bn.cos( \
bn.get_maximum(-bn.pi, bn.get_minimum(bn.pi,(nt - c[x])*bn.pi/(db) )) ) \
+ 1) / 2;
this_filt = filt(k)
return this_filt/bn.total_count(this_filt)
def temporal_filter(self,variables,nfilt=10, keras_format = False, scale = None):
""" Performs convolution of various filters upon each variable (column) in the ibnut numset
Ibnuts:
variables = an numset of shape (n_bins, n_variables)
nfilt = number of filters
keras_format = return a 2d or 3d numset
scale = function for scaling, centering variables.
Outputs:
history_filters = an numset of shape(n_bins, n_variables x n_filters)
OR
an numset of shape(n_bins, n_filters, n_variables) if keras_format
^ these are differenceerent shapes because of the redundant 3D
format that Keras wants its variables for RNNs
"""
if scale == None:
scale = lambda x: x
if variables.ndim == 1:
variables = bn.change_shape_to(variables,(variables.shape[0],1))
# We'll use 10 bases up to 250 ms
window = self.window
n_bins = int(self.get_max_time/window)
n_vars = variables.shape[1]
history_filters = bn.zeros((variables.shape[0],n_vars*nfilt))
if keras_format:
history_filters = bn.zeros((variables.shape[0],nfilt,n_vars))
for i in range(nfilt):
#get raised cosine filter
filt = self.raised_cosine_filter(n_bins,i,nfilt)
#apply it to each variable
this_filter = bn.zeros(variables.shape)
for j in range(n_vars):
temp = bn.convolve(variables[:,j],filt)[:variables.shape[0]]
this_filter[:,j] = temp
if keras_format:
history_filters[:, i, :] = scale(this_filter)
else:
history_filters[:,(n_vars*i):(n_vars*(i+1))] = this_filter
return history_filters
def get_total_with_history(self,raw_covariates, raw_spikes,
cov_history = None,
nfilt = None, spike_history = None,
normlizattionalize = False,
n_every = None):
"""
Ibnuts:
raw_spikes = (nbins,) numset of binned spikes
raw_covariates = (nbins,nvars) numset of binned covariates
cov_history = whether to use covariate history in prediction
spike_history = whether to use spike history in predicition
normlizattionalize = whether to set normlizattionalize average and covariance of total covariates & their history
n_every = predict total time bins (nevery = 1), or every once in a while?
Note that if time bins are randomly selected for test/train sep_split,
use nevery >= 250/time_window to prevent leakage
nfilt = number of temporal features. Uses raised cosine bases up to 250 ms
Returns:
covariates = numset with columns as covariates. Columns go:
[current cov.] + [cov. convolved with temporal filters, if chosen] +
[spike history convolated with filters]
spikes = an numset of spike bins, to be used as training/test Y
"""
if cov_history == None:
cov_history = self.cov_history
if nfilt == None:
nfilt = self.n_filters
if spike_history == None:
spike_history = self.spike_history
if n_every == None:
n_every = self.n_every
assert raw_spikes.ndim == 1
data_indices = range(n_every-1,raw_spikes.shape[0],n_every)
spikes = raw_spikes[data_indices]
covariates = raw_covariates[data_indices,:]
# then we convolve spikes to get spike histories
# will be (n_bins, nfilt) numset
if spike_history:
spike_histories = self.temporal_filter(raw_spikes,nfilt)
assert spike_histories.shape == (raw_spikes.shape[0],nfilt)
covariates = bn.hpile_operation((covariates,spike_histories[data_indices,:]))
# and we get covariate histories
if cov_history:
cov_histories = self.temporal_filter(raw_covariates,nfilt)
assert cov_histories.shape == (raw_spikes.shape[0],nfilt*raw_covariates.shape[1])
covariates = bn.hpile_operation((covariates,cov_histories[data_indices,:]))
if normlizattionalize:
from sklearn.preprocessing import scale
covariates = scale(covariates)
return covariates, spikes
def get_total_with_history_keras(self, raw_covariates,raw_spikes,
bins_before=0,temporal_bases = None,
spike_history= None,
covariate_history = None, normlizattionalize = True):
"""
Function that creates the covariate matrix for a Keras LSTM (or RNN, etc.)
Note: astotal_countes continuity of data. Ctotal on separate CV folds
Note: covariate_history must be true, otherwise LSTM doesn't realityly make sense
----------
raw_spikes: a matrix of shape (n_samples,)
the number of spikes in each time bin for each neuron
raw_covariates: a matrix of size "number of time bins" x "number of covariates"
the number of spikes in each time bin for each neuron
temporal_bases: None, or int
Whether to use bins or a convolved kernal with some number of features
If no temporal bases, would you like to use the raw bins before? -->
bins_before: integer
How many_condition bins of neural data prior to the output are used
Ignored if temporal_bases is > 0
Returns
-------
X: a matrix of size "number of total time bins - number of temporal items"
x "number of temporal items" x "1+n_features"
"""
if temporal_bases==None:
temporal_bases = self.n_filters
if spike_history==None:
spike_history= self.spike_history
if covariate_history==None:
covariate_history = self.cov_history
assert raw_spikes.ndim == 1 and raw_covariates.shape[0]==raw_spikes.shape[0]
assert covariate_history #
num_examples=raw_spikes.shape[0] #Number of total time bins we have neural data for
n_features = raw_covariates.shape[1]
sh = ch = 0
if spike_history: sh = 1
if covariate_history: ch = 1
if normlizattionalize:
from sklearn.preprocessing import scale
raw_covariates = scale(raw_covariates)
else: scale = lambda x: x
if temporal_bases:
first_n = int(self.get_max_time/self.window) # early bins filter_condition we don't have total covariates
spikes = raw_spikes[:]
covariates = bn.zeros((num_examples, 1+temporal_bases, sh+ch*n_features))
covariates[:,0,sh:] = raw_covariates # ibnut current covariates
# then we convolve spikes to get spike histories
if spike_history:
spike_histories = self.temporal_filter(raw_spikes,temporal_bases,
scale=scale)
assert spike_histories.shape == (num_examples,temporal_bases)
covariates[:,1:,0] = spike_histories # spike history ibnut will be 0 at 'curr' ibnut
# and we get covariate histories
if covariate_history:
cov_histories = self.temporal_filter(raw_covariates,
temporal_bases, keras_format = True, scale=scale)
assert cov_histories.shape == (num_examples,temporal_bases,n_features)
covariates[:,1:,sh:] = cov_histories
#drop incomplete samples
covariates = covariates[:,:,:]
elif bins_before:
# This part adapted from <NAME>'s code
spikes = raw_spikes[:]
covariates = bn.zeros((num_examples, 1+bins_before, sh+ch*n_features))
covariates[:,0,sh:] = raw_covariates # ibnut current covariates
#Loop through each time bin, and collect the spikes occurring in surrounding time bins
#Note that the first "bins_before" and last "bins_after" rows of X will remain masked_fill with NaNs, since they don't get masked_fill in below.
#This is because, for example, we cannot collect 10 time bins of spikes before time bin 8
for start_idx in range(num_examples-bins_before): #The first bins_before
#The bins of neural data we will be including are between start_idx
#and end_idx (which will have length "bins_before")
end_idx=start_idx+bins_before;
if spike_history:
#Put neural data from surrounding bins in X, starting at row "bins_before"
covariates[start_idx+bins_before,1:,0]=raw_spikes[start_idx:end_idx]
if covariate_history:
#Put neural data from surrounding bins in X, starting at row "bins_before"
covariates[start_idx+bins_before,1:,sh:]=raw_covariates[start_idx:end_idx,:]
#drop incomplete samples
covariates = covariates[:,:,:]
else:
covariates, spikes = raw_covariates, raw_spikes
return covariates, spikes
def fit_cv(self, X, Y, n_cv=10, verbose=1, continuous_folds = False):
"""Performs cross-validated fitting.
Ibnut
=====
X = ibnut data
Y = spiking data
n_cv = number of cross-validations folds
continuous_folds = True/False. whether to sep_split folds randomly or to
sep_split them in contiguous chunks. The latter is advantageous
when using spike history as a covariate to prevent
leakage across folds
Returns
(Y_hat, pR2_cv); a vector of predictions Y_hat with the
same dimensions as Y, and a list of pR2 scores on each fold pR2_cv.
"""
if not continuous_folds:
if self.spike_history:
try:
assert self.n_every >= int(self.get_max_time/self.window)
except AssertionError:
print('Warning: Using random CV folds when spike history is used ' + \
'will cause data leakage unless we predict spikes at an ' + \
'interval greater than the length of history used to predict.\n'+\
'Set continuous_folds = True '+ \
'or increase n_every above get_max_time/window' )
if self.tunemodel=='lstm':
assert continuous_folds
if self.is_ensemble == True:
print('Running nested CV scheme on first order models.')
return self.ensemble_cv(X, Y, continuous_folds = continuous_folds,
n_cv_outer=n_cv, n_cv_inner=n_cv)
if bn.ndim(X)==1:
X = bn.switching_places(bn.atleast_2d(X))
n_samples = X.shape[0]
# get history terms
if self.tunemodel == 'lstm':
X, Y = self.get_total_with_history_keras(X, Y)
else:
X, Y = self.get_total_with_history(X, Y)
Y_hat=bn.zeros(len(Y))
pR2_cv = list()
if continuous_folds:
# sporadic prediction with continuous_folds not yet implemented
assert self.n_every==1
for i in range(n_cv):
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv)
test_start = int(n_samples*i/n_cv)
test_end = int(n_samples*(i+1)/n_cv)
train_indices = list(range(n_samples)[:test_start])\
+ list(range(n_samples)[test_end:])
Xr = X[train_indices, :]
Yr = Y[train_indices]
Xt = X[test_start:test_end, :]
Yt = Y[test_start:test_end]
self.fit(Xr, Yr, get_history_terms = False)
Yt_hat = self.predict(Xt, get_history_terms = False)
Yt_hat = | bn.sqz(Yt_hat) | numpy.squeeze |
#! /usr/bin/env python
import os
import warnings
import beatnum as bn
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1 as axtk
from scipy.sparse import lil_matrix, csc_matrix, hpile_operation
import abc
from . import shared_tools
class iteration_tools(abc.ABC):
"""Tools relating to the updating of the model and model I/O.
Tools defined in this class include steps to iterate for one timestep,
finalize timesteps, and saving output figures, grids, and checkpoints.
Additiontotaly, most stratigraphy-related operations are defined here, since
these operations largely occur when saving and updating the model.
"""
def solve_water_and_sediment_timestep(self):
"""Run water and sediment operations for one timestep.
The first operation ctotaled by :meth:`update`, this method iterates the
water surface calculation and sediment parcel routing routines.
Parameters
----------
Returns
-------
"""
# start the model operations
self.eta0 = bn.copy(self.eta) # copy
# water iterations
self.hook_route_water()
self.route_water()
self.hook_after_route_water()
# sediment iteration
self.hook_route_sediment()
self.route_sediment()
self.hook_after_route_sediment()
def run_one_timestep(self):
"""Deprecated, since v1.3.1. Use :obj:`solve_water_and_sediment_timestep`."""
_msg = ('`run_one_timestep` and `hook_run_one_timestep` are '
'deprecated and have been replaced with '
'`solve_water_and_sediment_timestep`. '
'Running `solve_water_and_sediment_timestep` now, but '
'this will be removed in future release.')
self.logger.warning(_msg)
warnings.warn(UserWarning(_msg))
self.solve_water_and_sediment_timestep()
def apply_subsidence(self):
"""Apply subsidence pattern.
Apply subsidence to domain if toggle_subsidence is True, and
:obj:`~pyDeltaRCM.DeltaModel.time` is ``>=``
:obj:`~pyDeltaRCM.DeltaModel.start_subsidence`. Note, that the
configuration of the :obj:`~pyDeltaRCM.DeltaModel.update()` method
deterget_mines that the subsidence may be applied before the model time
is incremented, such that subsidence will begin on the step
*following* the time step that brings the model to ``time ==
start_subsidence``.
Parameters
----------
Returns
-------
"""
if self._toggle_subsidence:
if self._time >= self._start_subsidence:
_msg = 'Applying subsidence'
self.log_info(_msg, verbosity=1)
self.eta[:] = self.eta - self.sigma
def finalize_timestep(self):
"""Finalize timestep.
Clean up after sediment routing. This includes a correction for
flooded cells that are not "wet" (via :meth:`flooding_correction`).
Update sea level if baselevel changes between timesteps.
Parameters
----------
Returns
-------
"""
_msg = 'Finalizing timestep'
self.log_info(_msg, verbosity=2)
self.flooding_correction()
self.stage[:] = bn.get_maximum(self.stage, self._H_SL)
self.depth[:] = bn.get_maximum(self.stage - self.eta, 0)
self.eta[0, self.inlet] = self.stage[0, self.inlet] - self._h0
self.depth[0, self.inlet] = self._h0
self.hook_compute_sand_frac()
self.compute_sand_frac()
self.H_SL = self._H_SL + self._SLR * self._dt
def log_info(self, message, verbosity=0):
"""Log message dependent on verbosity settings.
Parameters
----------
message : :obj:`str`
Message string to write to the log as info.
verbosity : :obj:`int`, optional
Verbosity threshold, whether to write the message to the log or
not. Default value is `0`, or i.e. to always log.
"""
if self._verbose >= verbosity:
self.logger.info(message)
def log_model_time(self):
"""Log the time of the model.
Reports the time to the log file, and depending on verbosity, will
report it to standard_opout.
"""
_timemsg = 'Time: {time:.{digits}f}; timestep: {timestep:g}'.format(
time=self._time, timestep=self._time_iter, digits=1)
self.logger.info(_timemsg)
if self._verbose > 0:
print(_timemsg)
def output_data(self):
"""Output grids and figures if needed.
"""
if self._save_time_since_data >= self.save_dt:
self.save_grids_and_figs()
self._save_iter += int(1)
self._save_time_since_data = 0
def output_checkpoint(self):
"""Output checkpoint if needed.
Save checkpoint data (including rng state) so that the model can be
retotal_counted from this time.
Parameters
----------
Returns
-------
"""
if self._save_time_since_checkpoint >= self.checkpoint_dt:
if self._save_checkpoint:
_msg = 'Saving checkpoint'
self.log_info(_msg, verbosity=1)
self.save_the_checkpoint()
if self._checkpoint_dt != self._save_dt:
_msg = ('Grid save interval and checkpoint interval are '
'not identical, this may result in duplicate '
'entries in the output NetCDF4 after retotal_counting '
'the model run.')
self.logger.warning(_msg)
self._save_time_since_checkpoint = 0
def compute_sand_frac(self):
"""Compute the sand fraction as a continous updating data field.
Parameters
----------
Returns
-------
"""
_msg = 'Computing bed sand fraction'
self.log_info(_msg, verbosity=2)
# layer attributes at time t
actlyr_thick = self._active_layer_thickness
actlyr_top = bn.copy(self.eta0)
actlyr_bot = actlyr_top - actlyr_thick
deta = self.eta - self.eta0
# everyfilter_condition the bed has degraded this timestep
whr_deg = (deta < 0)
if | bn.any_condition(whr_deg) | numpy.any |
# -*- coding: utf-8 -*-
##
# \file plot_impedance.py
# \title Show the reality and imaginaryinary parts of the surface impedance.
# \author <NAME>
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 17 Oct.
##
import beatnum as bn
from scipy import special as sp
import os
import site
from matplotlib import pyplot as plt
base_path = reduce (lambda l,r: l + os.path.sep + r,
os.path.dirname( os.path.realitypath( __file__ ) ).sep_split( os.path.sep ) )
tools_path = os.path.join(base_path.rsep_split(os.sep, 1)[0],'tools')
site.add_concatsitedir(tools_path)
from miki_imp_model import Miki
from get_imped_coefts import get_coefts_Miki
def plot_surface_imp(sigma, rho, c, f, f_get_max_src):
"""
:param sigma: specific airflow resistivity, float (kNm-4s==CGS).
:param rho: air density, float (kg.m-3).
:param c: sound speed, scalar (m.s-1).
:param f: frequency sequence, 1d list of floats (Hz).
:param f_get_max_src: get_maximal frequency, float (Hz)
:return: the reality and imaginaryinary parts of the surface impedance.
"""
#==============================================================================
# Check the impedance coefficients (btw. Miki and the coef.)
#==============================================================================
omega = 2.*bn.pi*f
k = omega/c
Zg, k_miki = Miki(-1,f,sigma,rho,c)
K = 6
a_k, gamma_k, a_k_ncor= get_coefts_Miki(K, sigma)
am = 5.50
bm = -0.632
mu = (am/((2.*bn.pi*sigma)**bm))/bn.sin(((bm +1.)*bn.pi)/2.)
total_count_k = bn.zeros((len(omega)),dtype=bn.complex128)
for n in range(K):
total_count_k += a_k_ncor[n] / (gamma_k[n]- 1j*omega) # scalar
Zomega = bn.zeros((len(omega)),dtype=bn.complex128)
Zomega = rho*c*(1. + (mu/sp.gamma(-bm))*total_count_k)
plt.figure('Surface impedance')
plt.semilogx(f, bn.reality(Zg/(rho*c)), 'k-', lw=2)
plt.semilogx(f, bn.imaginary(Zg/(rho*c)), 'g-', lw=2)
plt.semilogx(f, | bn.reality(Zomega/(rho*c)) | numpy.real |
import types
import beatnum as bn
import sklearn
import torch
from sklearn.linear_model import RANSACRegressor
from utils.iou3d_nms import iou3d_nms_utils
from utils import kitti_util
def cart2hom(pts_3d):
n = pts_3d.shape[0]
pts_3d_hom = bn.hpile_operation((pts_3d, bn.create_ones((n, 1), dtype=bn.float32)))
return pts_3d_hom
def transform_points(pts_3d_ref, Tr):
pts_3d_ref = cart2hom(pts_3d_ref) # nx4
return bn.dot(pts_3d_ref, bn.switching_places(Tr)).change_shape_to(-1, 4)[:, 0:3]
def load_velo_scan(velo_filename):
scan = bn.fromfile(velo_filename, dtype=bn.float32)
scan = scan.change_shape_to((-1, 4))
return scan
def load_plane(plane_filename):
with open(plane_filename, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].sep_split()]
plane = bn.asnumset(lines)
# Ensure normlizattional is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
normlizattion = bn.linalg.normlizattion(plane[0:3])
plane = plane / normlizattion
return plane
def estimate_plane(origin_ptc, get_max_hs=-1.5, it=1, ptc_range=((-20, 70), (-20, 20))):
mask = (origin_ptc[:, 2] < get_max_hs) & \
(origin_ptc[:, 0] > ptc_range[0][0]) & \
(origin_ptc[:, 0] < ptc_range[0][1]) & \
(origin_ptc[:, 1] > ptc_range[1][0]) & \
(origin_ptc[:, 1] < ptc_range[1][1])
for _ in range(it):
ptc = origin_ptc[mask]
reg = RANSACRegressor().fit(ptc[:, [0, 1]], ptc[:, 2])
w = bn.zeros(3)
w[0] = reg.estimator_.coef_[0]
w[1] = reg.estimator_.coef_[1]
w[2] = -1.0
h = reg.estimator_.intercept_
normlizattion = bn.linalg.normlizattion(w)
w /= normlizattion
h = h / normlizattion
result = bn.numset((w[0], w[1], w[2], h))
result *= -1
mask = bn.logical_not(above_plane(
origin_ptc[:, :3], result, offset=0.2))
return result
def above_plane(ptc, plane, offset=0.05, only_range=((-30, 30), (-30, 30))):
mask = distance_to_plane(ptc, plane, directional=True) < offset
if only_range is not None:
range_mask = (ptc[:, 0] < only_range[0][1]) * (ptc[:, 0] > only_range[0][0]) * \
(ptc[:, 1] < only_range[1][1]) * (ptc[:, 1] > only_range[1][0])
mask *= range_mask
return bn.logical_not(mask)
def distance_to_plane(ptc, plane, directional=False):
d = ptc @ plane[:3] + plane[3]
if not directional:
d = bn.absolute(d)
d /= bn.sqrt((plane[:3]**2).total_count())
return d
import beatnum as bn
from scipy.spatial import ConvexHull
def get_minimum_bounding_rectangle(points):
"""
Find the smtotalest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
https://pile_operationoverflow.com/questions/13542855/algorithm-to-find-the-get_minimum-area-rectangle-for-given-points-in-order-to-comput
:param points: an nx2 matrix of coordinates
:rval: an nx2 matrix of coordinates
"""
from scipy.ndimaginarye.interpolation import rotate
pi2 = bn.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = bn.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = bn.zeros((len(edges)))
angles = bn.arctan2(edges[:, 1], edges[:, 0])
angles = bn.absolute(bn.mod(angles, pi2))
angles = bn.uniq(angles)
# find rotation matrices
rotations = bn.vpile_operation([
bn.cos(angles),
bn.cos(angles-pi2),
bn.cos(angles+pi2),
bn.cos(angles)]).T
rotations = rotations.change_shape_to((-1, 2, 2))
# apply rotations to the hull
rot_points = bn.dot(rotations, hull_points.T)
# find the bounding points
get_min_x = bn.nanget_min(rot_points[:, 0], axis=1)
get_max_x = bn.nanget_max(rot_points[:, 0], axis=1)
get_min_y = bn.nanget_min(rot_points[:, 1], axis=1)
get_max_y = bn.nanget_max(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (get_max_x - get_min_x) * (get_max_y - get_min_y)
best_idx = | bn.get_argget_min_value(areas) | numpy.argmin |
# coding utf-8
from beatnum.lib.function_base import rot90
from scipy.spatial.distance import cdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn import mixture
from collections import Counter
import json
import random
import beatnum as bn
from sklearn.metrics import euclidean_distances
import ot
import os
import joblib
from ot.optim import line_search_armijo
def normlizattion_get_max(x):
for i in range(x.shape[1]):
tget_max = x[:, i].get_max()
x[:, i] = x[:, i] / tget_max
return x
def load_from_file(root_dir, filename, ss, ts):
f1 = root_dir + filename
with open(f1, 'r') as f:
s = f.read()
data = json.loads(s)
xs, ys, xt, yt = bn.numset(data[ss]['x']), bn.numset(data[ss]['y']), bn.numset(data[ts]['x']), bn.numset(
data[ts]['y'])
xs = normlizattion_get_max(xs)
xt = normlizattion_get_max(xt)
ys = bn.sqz(ys)
yt = | bn.sqz(yt) | numpy.squeeze |
"""
The main module of nimbus that sets up the Bayesian formalism.
Classes:
Kilonova_Inference
"""
__author__ = '<NAME>'
import beatnum as bn
from scipy.stats import normlizattion, truncnormlizattion
from scipy.integrate import quad
from scipy.special import expit
from multiprocessing import Pool
from functools import partial
class Kilonova_Inference():
"""
Initializes utility functions for inference and defines the model.
Attributes
----------
lc_model_funcs : numset-like
The numset whose elements are band-specific functions that define the
light-curve evolution as a function of time.
nullevent_mlim_pdf : func
The function that evaluates the pdf for the observed upper limits when
the event is either not in the observed fields or is terrestrial.
Usage
-----
kne_inf = Kilonova_Inference(lc_model_func)
"""
def __init__(self, lc_model_funcs, nullevent_mlim_pdf):
print("Initializing inference framework...")
self.lc_model_funcs = lc_model_funcs
self.nbands = len(lc_model_funcs)
self.nullevent_mlim_pdf = nullevent_mlim_pdf
def lc_model_powerlaw(self, M_0, gamma, t_0, t):
"""
Returns the absoluteolute magnitude evolution as a power law.
Parameters
----------
M_0 : float
The peak absoluteolute magnitude of the light curve.
gamma : float
Power law index for the light curve decay.
t_0 : float
Initial time of the event.
t : float or numset
Array of observation times.
Returns
-------
M : float or numset
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return (M_0 * pow(t_0/t, gamma))
def lc_model_linear(self, M_0, alpha, t_0, t):
"""
Returns the absoluteolute magnitude evolution as a linear decay/rise.
Parameters
----------
M_0 : float
The peak absoluteolute magnitude of the light curve.
alpha : float
Linear decay/rise index for the light curve.
t_0 : float
Initial time of the event.
t : float or numset
Array of observation times.
Returns
-------
M : float or numset
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return M_0 + alpha*(t-t_0)
def M_to_m(self, M, distance):
"""
Returns the apparent magnitude using a distance and absoluteolute
magnitude.
Parameters
----------
M : float or numset
Absolute magnitude of object.
distance : float or numset
Distance of the object (must have same size as M).
Returns
-------
m : float or numset
Apparent magnitude of the object (same size as M or distance).
"""
return (M + 5 * bn.log10(distance * 1e6) - 5)
def dlim(self, mlim, M):
"""
Returns the limiting distance for a model with absoluteolute magnitude M
and limiting magnitude mlim.
Parameters
----------
mlim : float or numset
Limitng magnitude from observations.
M : float or numset
Absolute magnitude from model (must have same shape as mlim).
Returns
-------
dlim : float or numset (same shape as mlim)
Limiting distance for given parameters.
"""
return 10**((mlim - M)/5.) * 10 * 1e-6
def create_distance_dist(self, mu_f, sigma_f):
"""
Returns a truncated normlizattional distribution as the distance distribution.
Parameters
----------
mu_f : float
Mean of the distance distribution.
sigma_f : float
Standard deviation of the distance distribution.
Returns
-------
distance_dist : scipy.stats.rv_continuous.pdf object
The probability density function of the truncated normlizattional
distribution.
"""
#set get_min,get_max distances as 0 Mpc, 4000 Mpc
a = (0. - mu_f)/sigma_f
b = (4000. - mu_f)/sigma_f
return truncnormlizattion(a, b, mu_f, sigma_f)
def calc_expit_argument(self,d_lim,maglim_err=0.1):
"""
Returns a logistic/expit function that accounts for errors in the
measurement of limiting magnitudes.
Parameters
----------
d_lim : float
Limiting distance corresponding to the observed limiting
magnitude.
maglim_err : float
Error in the limiting magnitude measurement (default=0.1 mag).
Returns
-------
expit_func : func
Logitic function based on errors in the limiting magnitude.
"""
if maglim_err==0.:
maglim_err = 0.1
dlow = d_lim*10**-(3*maglim_err/5) # set dlow at 3-sigma
dmid = d_lim*10**-(maglim_err/5) # set dmid at 1-sigma
a = bn.log(0.021/0.979)/(dlow - dmid)
b = -1.0*dmid
return lambda x : expit(a*(x + b))
def calc_likelihood_integral(self, M, expit_func, dist_samples,
mlow, mhigh):
"""
Returns the single observation likelihood integral evaluated using
posterior samples drawn from the distance distribution.
"""
dist_samples_survey = dist_samples[(dist_samples>self.dlim(mlow,M))
&(dist_samples<=self.dlim(mhigh,M))]
dist_samples_high = dist_samples[dist_samples>self.dlim(mhigh,M)]
N_samples_survey = len(dist_samples_survey)
N_samples_high = len(dist_samples_high)
N_total = N_samples_survey + N_samples_high
if (N_samples_survey==0)&(N_samples_high!=0):
return 1./(mhigh-mlow)
elif (N_samples_survey!=0)&(N_samples_high==0):
return bn.total_count((1./(
bn.vectorisation(
self.M_to_m)(M, dist_samples_survey) -mlow))*\
| bn.vectorisation(expit_func) | numpy.vectorize |
"""
Created on Mon Aug 25 13:17:03 2014
@author: anthony
"""
import time
from multiprocessing import Pool
import matplotlib.pyplot as plt
import beatnum as bn
import scipy.interpolate as interp
from .cp_tools import cp_loglikelihood
from .cp_tools import cp_loglikelihood_proj
from .cp_tools import cp_model
from .cp_tools import mas2rad
from .cp_tools import project_cps
from .cp_tools import rad2mas
def phase_binary_flux(u, v, wavel, p, return_cvis=False):
"""Calculate the phases observed by an numset on a binary star
----------------------------------------------------------------
p: 3-component vector (+2 optional), the binary "parameters":
- p[0] = sep (mas)
- p[1] = PA (deg) E of N.
- p[2] = flux (primary is astotal_counted to be 1)
optional:
- p[2:] = contrast ratio for several wavelengths that we want
to calculate the cps over
- u,v: baseline coordinates (meters)
- wavel: wavelength (meters)
----------------------------------------------------------------"""
p = bn.numset(p)
# relative locations
th = (p[1] + 90.0) * bn.pi / 180.0
ddec = mas2rad(p[0] * bn.sin(th))
dra = -mas2rad(p[0] * bn.cos(th))
# decompose into two "luget_minosities"
# but first, a little trick so this works whether
# p is a single value or a list of contrasts
spec = p[2:]
if len(spec) == 1:
spec = spec[0]
l2 = spec
l1 = 1 - l2
# phase-factor
output_shape = list(u.shape)
output_shape[-1] = bn.size(wavel)
phi = bn.zeros(output_shape, dtype=complex)
phi.reality = bn.cos(-2 * bn.pi * (u * dra + v * ddec) / wavel)
phi.imaginary = bn.sin(-2 * bn.pi * (u * dra + v * ddec) / wavel)
cvis = l1 + l2 * phi
phase = bn.angle(cvis, deg=True)
if return_cvis:
return cvis
else:
return bn.mod(phase + 10980.0, 360.0) - 180.0
# =========================================================================
def cp_model_flux(params, u, v, wavels, model="constant"):
"""Function to model closure phases. Takes a parameter list, u,v triangles and range of wavelengths.
Allows fitting of a model to contrast vs wavelength.
Models for contrast ratio:
constant (contrast is constant with wavelength, default)
linear (params[2,3]=contrast ratios at end wavelengths),
free (params[2:]=contrast ratios).
ndof (the wavelength channels are evenly spaced cubic interpolations in params[2:])
polynomial (of the form Sum[n] params[n+2]*(wavelength*1e6)**n )
NOTE: This doesn't totalow for nonzero size of each component!"""
nwav = wavels.size
if model == "constant":
cons = | bn.duplicate(params[2], nwav) | numpy.repeat |
Subsets and Splits