python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
bolt-master | experiments/python/datasets/__init__.py |
|
#!/usr/bin/env/python
import os
import numpy as np
from joblib import Memory
import pandas as pd
from . import paths
_memory = Memory('.', verbose=1, compress=9)
UCR_DATASETS_DIR = paths.UCR
UCR_INFO_PATH = paths.UCR_INFO
# ================================================================
# Public
# ================================================================
def all_ucr_datasets():
for dataDir in sorted(all_ucr_dataset_dirs()):
yield UCRDataset(dataDir)
class UCRDataset(object):
def __init__(self, dataset_dir, sep='\t', precondition=True, znorm=True):
self.name = name_from_dir(dataset_dir)
self.X_train, y_train = read_ucr_train_data(dataset_dir, sep=sep)
self.X_test, y_test = read_ucr_test_data(dataset_dir, sep=sep)
# self.y_train = y_train
# self.y_test = y_test
all_lbls = np.r_[y_train, y_test]
uniq_lbls = np.unique(all_lbls)
new_lbls = np.argsort(uniq_lbls) # same if labels are 0..(nclasses-1)
mapping = dict(zip(uniq_lbls, new_lbls))
self.y_train = np.array([mapping[lbl] for lbl in y_train])
self.y_test = np.array([mapping[lbl] for lbl in y_test])
# self.nclasses = len(uniq_lbls)
# MelbournePedestrian has nans, even though not in missing data list
for X in (self.X_train, self.X_test):
for d in range(X.shape[1]):
col = X[:, d]
nan_idxs = np.isnan(col)
if nan_idxs.sum() > 0:
# print("self.name: ", self.name)
# print("original number of nans: ", np.sum(nan_idxs))
# X[nan_idxs, d] = col.mean()
fillval = np.nanmedian(col)
if np.isnan(fillval):
# handle all-nan cols, which happens in Crop
fillval = np.nanmedian(X)
col[nan_idxs] = fillval
# np.nan_to_num(col, copy=False, nan=np.median(col))
# print("new number of nans: ", np.isnan(X[:, d]).sum())
# print("new number of nans: ", np.isnan(col).sum())
if znorm:
self.X_train -= self.X_train.mean(axis=1, keepdims=True)
self.X_test -= self.X_test.mean(axis=1, keepdims=True)
eps = 1e-20
self.X_train *= 1 / (self.X_train.std(axis=1, keepdims=True) + eps)
self.X_test *= 1 / (self.X_test.std(axis=1, keepdims=True) + eps)
elif precondition:
# weaker than znormalization since one offset and scale applied
# to all dims and all samples in both train and test sets; this
# is basically just here because the values in MelbournePedestrian
# are huge and screw up numerical algorithms
self.orig_mean = np.mean(self.X_train)
self.X_train -= self.orig_mean
self.X_test -= self.orig_mean
self.orig_std = np.std(self.X_train)
self.X_train /= self.orig_std
self.X_test /= self.orig_std
assert len(self.X_train) == len(self.y_train)
assert len(self.X_test) == len(self.y_test)
# if self.name == 'MelbournePedestrian':
# print("I am MelbournePedestrian!")
# print('new labels: ', new_lbls)
# print("X_train num nans", np.sum(np.isnan(self.X_train)))
# print("X_test num nans", np.sum(np.isnan(self.X_test)))
# # import sys; sys.exit()
# if self.name == 'Wafer':
# print("original uniq labels train", np.unique(self.y_train))
# print("original uniq labels test", np.unique(self.y_test))
def all_ucr_dataset_dirs():
return _ucr_datasets_in_dir(UCR_DATASETS_DIR)
# ================================================================
# Private
# ================================================================
def _ucr_datasets_in_dir(dirpath):
datasetsPath = os.path.expanduser(dirpath)
files = os.listdir(datasetsPath)
rm_dir = 'Missing_value_and_variable_length_datasets_adjusted'
if rm_dir in files:
files.remove(rm_dir)
for i in range(len(files)):
files[i] = os.path.join(datasetsPath, files[i])
dirs = list(filter(os.path.isdir, files))
return dirs
@_memory.cache
def _readtxt(path, sep=None):
return np.genfromtxt(path, delimiter=sep).astype(np.float32)
def read_data_file(path, sep=None, mean_norm=False):
D = _readtxt(path, sep=sep)
labels = D[:, 0].astype(np.int)
X = D[:, 1:]
if mean_norm:
X -= np.mean(X, axis=1, keepdims=True)
return (X, labels)
def name_from_dir(datasetDir):
return os.path.basename(datasetDir)
def dir_from_name(datasetName):
return os.path.join(paths.UCR, datasetName)
def read_ucr_data_in_dir(datasetDir, train, sep=None):
datasetName = name_from_dir(datasetDir)
if train:
fileName = datasetName + "_TRAIN.tsv"
else:
fileName = datasetName + "_TEST.tsv"
filePath = os.path.join(datasetDir, fileName)
return read_data_file(filePath, sep=sep)
def read_ucr_train_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=True, sep=sep)
def read_ucr_test_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=False, sep=sep)
# combines train and test data
def read_all_ucr_data(ucrDatasetDir):
X_train, Y_train = read_ucr_train_data(ucrDatasetDir)
X_test, Y_test = read_ucr_test_data(ucrDatasetDir)
X = np.r_[X_train, X_test]
Y = np.r_[Y_train, Y_test]
return X, Y
@_memory.cache
def load_ucr_dset_stats():
df = pd.read_csv(UCR_INFO_PATH)
df['l2-1nn-acc'] = 1. - df['ED (w=0)']
return df
# ================================================================ Main
@_memory.cache
def _load_ucr_stats_df():
stats = []
for i, datasetDir in enumerate(all_ucr_dataset_dirs()):
# Xtrain, _ = read_ucr_train_data(datasetDir)
# Xtest, Ytest = read_ucr_test_data(datasetDir)
dset = UCRDataset(datasetDir)
N, D = dset.X_train.shape
M, D = dset.X_test.shape
nclasses = len(np.unique(dset.y_test))
stats.append({'Dataset': dset.name, 'N': N, 'D': D, 'M': M,
'nclasses': nclasses})
# print('%30s:\t%d\t%d\t%d\t%d' % (name_from_dir(datasetDir),
# N, M, D, nclasses)
return pd.DataFrame.from_records(stats)
def main():
# dsets = all_ucr_datasets()
# for dset in dsets:
# print("loaded ucr dset:", dset.name)
# # return
df = _load_ucr_stats_df()
# df = df.sort_values(axis=1)
# df = df.loc[df['N'] > 100]
# df = df.loc[df['M'] > 100]
print("ucr dset stats:")
# print(df['M'].sort_values(ascending=False))
print("number of dsets:", df.shape[0])
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
mvals = df['M'].to_numpy()
mvals = np.sort(mvals)
length = len(mvals)
total_sizes = np.array([m * (length - i) for i, m in enumerate(mvals)])
max_idx = np.argmax(total_sizes)
best_m_cutoff = mvals[max_idx]
print("best num dsets, m, sz = ",
length - max_idx, best_m_cutoff, total_sizes[max_idx])
print("type of mvals: ", type(mvals))
for cutoff in [100, 200, 256, 300, 400, 500, 512, 1000]:
ndsets = (mvals >= cutoff).sum()
total_sz = total_sizes[ndsets-1]
print(f"n >= {cutoff}: {ndsets} dsets, total sz = {total_sz}")
# import matplotlib.pyplot as plt
# xvals = length - np.arange(length)
# # xvals = np.arange(length)
# # plt.plot(xvals, total_sizes[::-1])
# plt.plot(xvals, total_sizes)
# plt.plot(xvals, mvals)
# plt.show()
# df = df.loc[df['M'] >= best_m_cutoff]
# print("---- after cutting off M to maximize mat sizes:")
df = df.loc[df['N'] >= 128]
print("---- after cutting off N to number of centroids:")
print("number of dsets: ", len(df))
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
print("mean, median nclasses: ", df['nclasses'].mean(), df['nclasses'].median())
print("min, max nclasses: ", df['nclasses'].min(), df['nclasses'].max())
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
main()
| bolt-master | experiments/python/datasets/ucr.py |
#!/bin/env python
from __future__ import absolute_import, division, print_function
from scipy import io
import numpy as np
import os
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR = '../datasets/svhn'
TRAIN_PATH = os.path.join(DATADIR, 'train_32x32.mat')
TEST_PATH = os.path.join(DATADIR, 'test_32x32.mat')
EXTRA_PATH = os.path.join(DATADIR, 'extra_32x32.mat')
def extract_data_from_mat_file(path):
matlab_dict = io.loadmat(path)
X, y = matlab_dict['X'], matlab_dict['y'].ravel()
X = np.transpose(X, (3, 0, 1, 2))
# make classes be 0-9 instead of 1-10; this way the classes line up
# with the actual digits
y[y == 10] = 0
assert len(y.shape) == 1
assert X.shape[0] == len(y)
assert X.shape[1] == 32
assert X.shape[2] == 32
assert X.shape[-1] == 3
return X, y
@_memory.cache
def load_data():
X_train, y_train = extract_data_from_mat_file(TRAIN_PATH)
X_test, y_test = extract_data_from_mat_file(TEST_PATH)
return (X_train, y_train), (X_test, y_test)
def load_extra_data():
return extract_data_from_mat_file(EXTRA_PATH)
def main():
import matplotlib.pyplot as plt
(X_train, y_train), (X_test, y_test) = load_data()
# hacky way to visualize extra data using same code
# X_extra, y_extra = load_extra_data()
# X_train, X_test = X_extra, X_extra
# y_train, y_test = y_extra, y_extra
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
X = X_test if i % 2 else X_train
y = y_test if i % 2 else y_train
idx = np.random.choice(X.shape[0])
ax.imshow(X[idx])
ax.set_title("class = {}".format(y[idx]))
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| bolt-master | experiments/python/datasets/svhn.py |
#!/bin/env/python
"""utility functions for data munging"""
from __future__ import absolute_import, division, print_function
import numpy as np
import sklearn
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, random_state=random_state)
def stratified_split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, stratify=Y, random_state=random_state)
| bolt-master | experiments/python/datasets/data_utils.py |
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', verbose=0)
DATA_DIR = paths.SHAREE_ECG
NUM_RECORDINGS = 139
NUM_CHANNELS = 3
RAW_DTYPE = np.uint16
# RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 128
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
# dtype = np.float32 if dtype is None else dtype
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # looks like it's rowmajor
# a = a.reshape(NUM_CHANNELS, -1).T # is colmajor clearly wrong? EDIT: yes
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
filt = np.hamming(5).astype(np.float32)
filt /= np.sum(filt)
for j in range(a.shape[1]):
a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
# def load_recordings(generator=False, plot=False, **kwargs):
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = SAMPLES_PER_MIN
a = rec[offset:(offset + 1000)]
print('about to plot recording', rec_id)
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
# print("done")
print("about to call load_recordings")
load_recordings(plot=True)
# print("rec ids: ", load_recording_ids())
print("called load_recordings")
| bolt-master | experiments/python/datasets/sharee.py |
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', verbose=0)
DATA_DIR = paths.INCART_ECG
NUM_RECORDINGS = 75
NUM_CHANNELS = 12
RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 257
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # yep, clearly rowmajor when plotted
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
a -= a.mean(axis=0) # just so r_sq values are more meaningful
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
# filt = np.hamming(5).astype(np.float32)
# filt /= np.sum(filt)
# for j in range(a.shape[1]):
# a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = 0
a = rec[offset:(offset + 1000)]
print("plotting recording {} with shape: {}".format(
rec_id, rec.shape))
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
print("about to call load_recordings")
load_recordings(plot=True)
print("called load_recordings")
| bolt-master | experiments/python/datasets/incart.py |
#!/bin/env python
# from __future__ import absolute_import, division, print_function
from __future__ import division, print_function
import numpy as np
from . import paths
from . import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = paths.CALTECH_101
DATADIR_256 = paths.CALTECH_256
# _DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center', verbose=2)
_DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center')
_CALTECH_101_KWARGS = dict(
dirpath=DATADIR_101, remove_classes='BACKGROUND_Google')
_CALTECH_256_KWARGS = dict(
dirpath=DATADIR_256, remove_classes='257.clutter')
@_memory.cache
def load_caltech101(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_101_KWARGS, **kwargs)
@_memory.cache
def load_caltech256(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_256_KWARGS, **kwargs)
@_memory.cache
def load_caltech101_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_101_KWARGS, only_return_path=True, **kwargs)
@_memory.cache
def load_caltech256_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_256_KWARGS, only_return_path=True, **kwargs)
# @_memory.cache
def load_caltech_img(img_id, **kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
path = img_id # load_jpegs_from_dir returns abs path as id
return imgs.load_jpg(path, **kwargs).astype(np.float32)
# img = imgs.load_jpg(path, **kwargs).astype(np.float32)
# print("img.shape", img.shape)
# assert img.shape[:2] == (224, 224)
# return img
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_101, remove_classes='BACKGROUND_Google')
# DATADIR_101, remove_classes='BACKGROUND_Google', crop='center')
DATADIR_101, remove_classes='BACKGROUND_Google', pad='square')
# # DATADIR_101, remove_classes='BACKGROUND_Google', resample=(224, 224))
# caltech 256
# (X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_256, remove_classes='257.clutter', verbose=2)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| bolt-master | experiments/python/datasets/caltech.py |
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
from python import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = '../datasets/caltech/101_ObjectCategories'
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# TODO
)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| bolt-master | experiments/python/datasets/flowers.py |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
from sklearn.datasets import load_digits
import timeit
import bolt
# ================================================================ utils
def _dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def _dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def _corr(x, y):
x, y = x.astype(np.float64), y.astype(np.float64)
x = x.ravel() - np.mean(x)
y = y.ravel() - np.mean(y)
r = np.mean(x * y) / (np.std(x) * np.std(y))
assert -1.00001 <= r <= 1.00001
return r
def _sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = _element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: _sq_dists_to_vectors: attempting to create a matrix"
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = (elements.shape[axis] - 1 - np.arange(k))[::-1]
# print "elements.shape", elements.shape
# print "using which_nn: ", which_nn
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def _knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = _sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = _knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block " \
"{} (queries {}-{})...".format(b, start, end))
assert np.all(truth != -999)
return truth
def _create_randn_encoder(Ntrain=100, Ntest=20, D=64):
enc = bolt.Encoder()
X_train = np.random.randn(Ntrain, D)
X_test = np.random.randn(Ntest, D)
enc.fit(X_train, just_train=True)
enc.set_data(X_test)
return enc
# ================================================================ tests
def test_smoketest():
"""Test that `bolt.Encoder`'s methods don't crash"""
D = 64
enc = _create_randn_encoder(D=D)
Nqueries = 5
Q = np.random.randn(Nqueries, D)
[enc.transform(q) for q in Q]
for k in [1, 3]:
[enc.knn(q, k) for q in Q]
def _fmt_float(x):
return '{}.'.format(int(x)) if x == int(x) else '{:.3f}'.format(x)
def _load_digits_X_Q(nqueries):
X, _ = load_digits(return_X_y=True)
return X[:-nqueries], X[-nqueries:] # X, Q
def test_time_space_savings(): # mostly to verify readme code
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
X, Q = _load_digits_X_Q(nqueries)
enc = bolt.Encoder(accuracy='lowest', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
# massive space savings
print("original space usage: {}B".format(X.nbytes)) # 1777 * 64 * 8B = 909KB
print("bolt space usage: {}B".format(enc.nbytes)) # 1777 * 2B = 3.55KB
# massive time savings (~10x here, but often >100x on larger datasets
# with less Python overhead; see the Bolt paper)
t_np = timeit.Timer(lambda: [np.dot(X, q) for q in Q]).timeit(5) # ~8ms
t_bolt = timeit.Timer(lambda: [enc.transform(q) for q in Q]).timeit(5) # ~800us
print("Numpy / BLAS time, Bolt time: {:.3f}ms, {:.3f}ms".format(
t_np * 1000, t_bolt * 1000))
def test_unquantize():
X, Q = _load_digits_X_Q(nqueries=20)
enc = bolt.Encoder('dot', accuracy='high').fit(X)
dots_true = [np.dot(X, q) for q in Q]
dots_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dots_true, dots_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("dot product unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
# print "true, bolt dot prods"
# print dots_true[0][:20].astype(np.int32)
# print dots_bolt[0][:20].astype(np.int32)
enc = bolt.Encoder('l2', accuracy='high').fit(X)
dists_true = [_dists_sq(X, q) for q in Q]
dists_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dists_true, dists_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("squared l2 unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
def test_basic():
# np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
# nqueries = 10
# nqueries = 3
X, Q = _load_digits_X_Q(nqueries)
# TODO rm this block
# shift = 100.
# shift = 100
# scaleby = 1.
# scaleby = 3.5 # acc goes to **** at accelerating rate as this gets larger...
# scaleby = 4
# scaleby = 1.0
# X, Q = X + shift, Q + shift
# X, Q = X * scaleby, Q * scaleby
# X = X[:200]
# X = X[:50]
# X = X[:20]
# X, _ = load_digits(return_X_y=True)
# Q = X[-nqueries:]
# X = X[:-nqueries]
# print "X.shape", X.shape
# print "X nbytes", X.nbytes
# ------------------------------------------------ squared l2
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.SQUARED_EUCLIDEAN)
enc.fit(X)
l2_corrs = np.empty(len(Q))
for i, q in enumerate(Q):
l2_true = _dists_sq(X, q).astype(np.int)
l2_bolt = enc.transform(q)
l2_corrs[i] = _corr(l2_true, l2_bolt)
if i == nqueries - 1:
print("l2 true: ", l2_true)
print("l2 bolt: ", l2_bolt)
print("corr: ", l2_corrs[i])
mean_l2 = np.mean(l2_corrs)
std_l2 = np.std(l2_corrs)
assert mean_l2 > .95
print("--> squared l2 dist correlation: {} +/- {}".format(mean_l2, std_l2))
# return
# ------------------------------------------------ dot product
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
dot_corrs = np.empty(nqueries)
for i, q in enumerate(Q):
dots_true = np.dot(X, q)
dots_bolt = enc.transform(q)
dot_corrs[i] = _corr(dots_true, dots_bolt)
mean_dot = np.mean(dot_corrs)
std_dot = np.std(dot_corrs)
assert mean_dot > .95
print("--> dot product correlation: {} +/- {}".format(mean_dot, std_dot))
# ------------------------------------------------ l2 knn
enc = bolt.Encoder(accuracy='low', reduction='l2')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_knn = _knn(X, Q, k_true)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((nqueries, k_bolt), dtype=np.bool)
for i in range(nqueries):
true_neighbors = true_knn[i]
bolt_neighbors = bolt_knn[i]
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> l2 knn precision@{}: {}".format(k_bolt, precision))
assert precision > .6
# # print "true_knn, bolt_knn:"
# # print true_knn[:20, :20]
# # print bolt_knn[:20]
# ------------------------------------------------ dot knn
enc = bolt.Encoder(accuracy='low', reduction='dot')
# enc = bolt.Encoder(accuracy='high', reduction='dot')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_dists = np.dot(X, Q.T)
# true_dists = [np.dot(X, q) for q in Q]
true_knn = np.empty((nqueries, k_true), dtype=np.int64)
for i in range(nqueries):
true_knn[i, :] = top_k_idxs(
true_dists[:, i], k_true, smaller_better=False)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((len(Q), k_bolt), dtype=np.bool)
for i in range(len(Q)):
true_neighbors = true_knn[i]
# bolt_dists = enc.transform(Q[i])
# bolt_neighbors = top_k_idxs(bolt_dists, k_bolt, smaller_better=True)
bolt_neighbors = bolt_knn[i] # TODO uncomment
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> max inner product knn precision@{}: {}".format(
k_bolt, precision))
assert precision > .6
# print("true_knn, bolt_knn:")
# print(true_knn[:5])
# print(bolt_knn[:5])
if __name__ == '__main__':
test_basic()
| bolt-master | tests/test_encoder.py |
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: [email protected] (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
| bolt-master | cpp/src/external/eigen/scripts/relicense.py |
# Intentionally empty
| bolt-master | cpp/src/external/eigen/debug/gdb/__init__.py |
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
import gdb
import re
import itertools
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val):
"Extract all the necessary information"
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = [x.replace(" ", "") for x in template_params]
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
| bolt-master | cpp/src/external/eigen/debug/gdb/printers.py |
from setuptools import setup, find_packages
setup(
name = 'attention-tensorflow-mesh',
packages = find_packages(),
version = '0.0.2',
license='MIT',
description = 'A bunch of attention related functions, for constructing transformers in tensorflow mesh',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/attention-tensorflow-mesh',
keywords = ['transformers', 'artificial intelligence'],
install_requires=[
'mesh-tensorflow',
'tensorflow-gpu>=1.15'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | attention-tensorflow-mesh-master | setup.py |
from attention_tensorflow_mesh.attention_tensorflow_mesh import transformer_lm, transformer, attention | attention-tensorflow-mesh-master | attention_tensorflow_mesh/__init__.py |
import math
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
# helpers
def default(val, d):
return val if val is not None else d
# simple linear layer
def linear(x, dim_out, scope = 'linear', bias = True):
with tf.variable_scope(scope):
*_, dim_in = x.shape
w_init_stdev = 1 / math.sqrt(dim_in.size)
return mtf.layers.dense(x, new_dims=[dim_out], reduced_dims=[dim_in], name=scope, use_bias=bias,
kernel_initializer=tf.random_normal_initializer(stddev=w_init_stdev, dtype=tf.float32))
# norm
def norm(x, axis = None, epsilon=1e-5):
axis = default(axis, x.shape[-1])
u = mtf.reduce_mean(x, reduced_dim=axis)
s = mtf.reduce_mean(mtf.square(x - u), reduced_dim=axis)
u = mtf.broadcast(u, x.shape)
s = mtf.broadcast(s, x.shape)
return (x - u) * mtf.rsqrt(s + epsilon)
def scale_norm(x, scope, *, axis=None, epsilon=1e-5, params=None):
if axis is None:
axis = x.shape[-1]
with tf.variable_scope(scope):
n_state = x.shape[-1]
dt = tf.float32
g = mtf.get_variable(x.mesh, 'g', [], initializer=tf.constant_initializer(1, dtype=dt), dtype=dt)
x = norm(x, axis, epsilon)
x = x * g
return x
def prenorm(fn, scope):
def inner(x, *args, **kwargs):
return fn(scale_norm(x, scope), *args, **kwargs)
return inner
def residual(fn):
def inner(x, *args, **kwargs):
return fn(x, *args, **kwargs) + x
return inner
# full multi-head attention
def attention(x, dim_head, dim_features_head, scope = 'attn', causal = False):
with tf.variable_scope(scope):
mesh, batch, seq, dim = x.mesh, *x.shape
dim_heads = mtf.Dimension('dim_heads', dim_head.size * dim_features_head.size)
dim_intermediate = mtf.Dimension('qkv_dimension', dim_heads.size * 3)
qkv = linear(x, dim_intermediate, bias = False, scope='to_qkv')
q, k, v = mtf.split(qkv, dim_intermediate, 3)
q, k, v = map(lambda t: mtf.reshape(t, [batch, seq, dim_head, dim_features_head]), (q, k, v))
q, k, v = map(lambda t: mtf.transpose(t, [batch, dim_head, seq, dim_features_head]), (q, k, v))
k, v = map(lambda t: mtf.rename_dimension(t, seq.name, 'memory_length'), (k, v))
mem_len_dim = v.shape[-2]
dots = mtf.layers.us_einsum([q, k], [batch, dim_head, seq, mem_len_dim])
if causal:
i = mtf.range(mesh, seq, tf.int32)
j = mtf.range(mesh, mem_len_dim, tf.int32)
i, j = map(lambda t: mtf.broadcast(t, [seq, mem_len_dim]), (i, j))
mask = mtf.less(i + mem_len_dim.size - seq.size, j)
mask = mtf.cast(mask, tf.float32) * -1e10
dots += mask
attn = mtf.softmax(dots, mem_len_dim)
out = mtf.einsum([attn, v], [batch, dim_head, seq, dim_features_head])
out = mtf.transpose(out, [batch, seq, dim_head, dim_features_head])
out = mtf.reshape(out, [batch, seq, dim_heads])
combined_out = linear(out, dim, scope='combine_output')
return combined_out
# feed forward
def ff(x, mult = 4, scope = 'ff'):
*_, dim = x.shape
with tf.variable_scope(scope):
dim_intermediate = mtf.Dimension('ff_intermediate', dim.size * mult)
h = linear(x, dim_intermediate, scope='w1')
h = mtf.gelu(h)
h = linear(h, dim, scope='w2')
return h
# block
def transformer(x, *, depth, dim_head, dim_features_head, causal = False):
attn_fn = residual(prenorm(attention, 'norm1'))
ff_fn = residual(prenorm(ff, 'norm2'))
for i in range(depth):
with tf.variable_scope(f'layer_{i}'):
x = attn_fn(x, dim_head, dim_features_head, causal = causal)
x = ff_fn(x)
return x
# language model
def transformer_lm(x, *, dim, num_tokens, depth, max_seq_len, dim_head, dim_features_head, causal = False):
mesh, batch, seq_dim = x.mesh, *x.shape
dim = mtf.Dimension('dim', dim)
dim_head = mtf.Dimension('dim_head', dim_head)
dim_features_head = mtf.Dimension('dim_features_head', dim_features_head)
dim_num_tokens = mtf.Dimension('vocab_size', num_tokens)
dim_max_seq_len = mtf.Dimension('max_seq_len', max_seq_len)
wte = mtf.get_variable(mesh, name='wte', shape=mtf.Shape([dim_num_tokens, dim]), dtype=tf.float32)
wpe = mtf.get_variable(mesh, name='wpe', shape=mtf.Shape([seq_dim, dim]), dtype=tf.float32)
x = mtf.gather(wte, x, dim_num_tokens)
p = mtf.gather(wpe, mtf.range(mesh, seq_dim, dtype=tf.int32), dim_max_seq_len)
x = x + p
x = transformer(x, depth = depth, dim_head = dim_head, dim_features_head = dim_features_head, causal = causal)
logits = linear(x, dim_num_tokens, scope='to_logits')
return logits
| attention-tensorflow-mesh-master | attention_tensorflow_mesh/attention_tensorflow_mesh.py |