Dataset Viewer
python_code
stringlengths 0
83.2k
|
---|
from charformer_pytorch.charformer_pytorch import GBST
|
import math
from math import gcd
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def next_divisible_length(seqlen, multiple):
return math.ceil(seqlen / multiple) * multiple
def pad_to_multiple(tensor, multiple, *, seq_dim, dim = -1, value = 0.):
seqlen = tensor.shape[seq_dim]
length = next_divisible_length(seqlen, multiple)
if length == seqlen:
return tensor
remainder = length - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# helper classes
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size):
super().__init__()
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, groups = dim_in)
self.proj_out = nn.Conv1d(dim_out, dim_out, 1)
def forward(self, x):
x = self.conv(x)
return self.proj_out(x)
# main class
class GBST(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_block_size = None,
blocks = None,
downsample_factor = 4,
score_consensus_attn = True
):
super().__init__()
assert exists(max_block_size) ^ exists(blocks), 'either max_block_size or blocks are given on initialization'
self.token_emb = nn.Embedding(num_tokens, dim)
if exists(blocks):
assert isinstance(blocks, tuple), 'blocks must be a tuple of block sizes'
self.blocks = tuple(map(lambda el: el if isinstance(el, tuple) else (el, 0), blocks))
assert all([(offset < block_size) for block_size, offset in self.blocks]), 'offset must be always smaller than the block size'
max_block_size = max(list(map(lambda t: t[0], self.blocks)))
else:
self.blocks = tuple(map(lambda el: (el, 0), range(1, max_block_size + 1)))
self.pos_conv = nn.Sequential(
Pad((0, 0, 0, max_block_size - 1)),
Rearrange('b n d -> b d n'),
DepthwiseConv1d(dim, dim, kernel_size = max_block_size),
Rearrange('b d n -> b n d')
)
self.score_fn = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
self.score_consensus_attn = score_consensus_attn
assert downsample_factor <= max_block_size, 'final downsample factor should be less than the maximum block size'
self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])
self.downsample_factor = downsample_factor
def forward(self, x, mask = None):
b, n, block_mult, ds_factor, device = *x.shape, self.block_pad_multiple, self.downsample_factor, x.device
m = next_divisible_length(n, ds_factor)
# get character token embeddings
x = self.token_emb(x)
# do a conv to generate the positions for the tokens
x = self.pos_conv(x)
# pad both sequence and mask to length visibile by all block sizes from 0 to max block size
x = pad_to_multiple(x, block_mult, seq_dim = 1, dim = -2)
if exists(mask):
mask = pad_to_multiple(mask, block_mult, seq_dim = 1, dim = -1, value = False)
# compute representations for all blocks by mean pooling
block_masks = []
block_reprs = []
for block_size, offset in self.blocks:
# clone the input sequence as well as the mask, in order to pad for offsets
block_x = x.clone()
if exists(mask):
block_mask = mask.clone()
# pad for offsets, if needed
need_padding = offset > 0
if need_padding:
left_offset, right_offset = (block_size - offset), offset
block_x = F.pad(block_x, (0, 0, left_offset, right_offset), value = 0.)
if exists(mask):
block_mask = F.pad(block_mask, (left_offset, right_offset), value = False)
# group input sequence into blocks
blocks = rearrange(block_x, 'b (n m) d -> b n m d', m = block_size)
# either mean pool the blocks, or do a masked mean
if exists(mask):
mask_blocks = rearrange(block_mask, 'b (n m) -> b n m', m = block_size)
block_repr = masked_mean(blocks, mask_blocks, dim = -2)
else:
block_repr = blocks.mean(dim = -2)
# append the block representations, as well as the pooled block masks
block_repr = repeat(block_repr, 'b n d -> b (n m) d', m = block_size)
if need_padding:
block_repr = block_repr[:, left_offset:-right_offset]
block_reprs.append(block_repr)
if exists(mask):
mask_blocks = torch.any(mask_blocks, dim = -1)
mask_blocks = repeat(mask_blocks, 'b n -> b (n m)', m = block_size)
if need_padding:
mask_blocks = mask_blocks[:, left_offset:-right_offset]
block_masks.append(mask_blocks)
# stack all the block representations
block_reprs = torch.stack(block_reprs, dim = 2)
# calculate scores and softmax across the block size dimension
scores = self.score_fn(block_reprs)
if exists(mask):
block_masks = torch.stack(block_masks, dim = 2)
max_neg_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~block_masks, max_neg_value)
scores = scores.softmax(dim = 2)
# do the cheap consensus attention, eq (5) in paper
if self.score_consensus_attn:
score_sim = einsum('b i d, b j d -> b i j', scores, scores)
if exists(mask):
cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
max_neg_value = -torch.finfo(score_sim.dtype).max
score_sim = score_sim.masked_fill(~cross_mask, max_neg_value)
score_attn = score_sim.softmax(dim = -1)
scores = einsum('b i j, b j m -> b i m', score_attn, scores)
# multiply the block representations by the position-wise scores
scores = rearrange(scores, 'b n m -> b n m ()')
x = (block_reprs * scores).sum(dim = 2)
# truncate to length divisible by downsample factor
x = x[:, :m]
if exists(mask):
mask = mask[:, :m]
# final mean pooling downsample
x = rearrange(x, 'b (n m) d -> b n m d', m = ds_factor)
if exists(mask):
mask = rearrange(mask, 'b (n m) -> b n m', m = ds_factor)
x = masked_mean(x, mask, dim = 2)
mask = torch.any(mask, dim = -1)
else:
x = x.mean(dim = -2)
return x, mask
|
"""
Bonito Aligner
"""
from threading import Thread
from functools import partial
from mappy import Aligner, ThreadBuffer
from bonito.multiprocessing import ThreadMap, ProcessMap
def align_map(aligner, sequences, n_thread=4):
"""
Align `sequences` with minimap using `n_thread` threads.
"""
return ThreadMap(partial(MappyWorker, aligner), sequences, n_thread)
class MappyWorker(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, aligner, input_queue=None, output_queue=None):
super().__init__()
self.aligner = aligner
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
thrbuf = ThreadBuffer()
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
mapping = next(self.aligner.map(v['sequence'], buf=thrbuf, MD=True), None)
self.output_queue.put((k, {**v, 'mapping': mapping}))
|
"""
Bonito Fast5 Utils
"""
import sys
from glob import glob
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from itertools import chain, starmap
import torch
import numpy as np
from scipy.signal import find_peaks
from ont_fast5_api.fast5_interface import get_fast5_file
class Read:
def __init__(self, read, filename):
self.read_id = read.read_id
self.filename = filename.name
self.run_id = read.get_run_id()
if type(self.run_id) in (bytes, np.bytes_):
self.run_id = self.run_id.decode()
read_attrs = read.handle[read.raw_dataset_group_name].attrs
channel_info = read.handle[read.global_key + 'channel_id'].attrs
self.offset = int(channel_info['offset'])
self.sampling_rate = channel_info['sampling_rate']
self.scaling = channel_info['range'] / channel_info['digitisation']
self.mux = read_attrs['start_mux']
self.channel = channel_info['channel_number']
if type(self.channel) in (bytes, np.bytes_):
self.channel = self.channel.decode()
self.start = read_attrs['start_time'] / self.sampling_rate
self.duration = read_attrs['duration'] / self.sampling_rate
raw = read.handle[read.raw_dataset_name][:]
scaled = np.array(self.scaling * (raw + self.offset), dtype=np.float32)
trim_start, _ = trim(scaled[:8000])
scaled = scaled[trim_start:]
self.template_start = self.start + (1 / self.sampling_rate) * trim_start
self.template_duration = self.duration - (1 / self.sampling_rate) * trim_start
if len(scaled) > 8000:
med, mad = med_mad(scaled)
self.signal = (scaled - med) / mad
else:
self.signal = norm_by_noisiest_section(scaled)
def __repr__(self):
return "Read('%s')" % self.read_id
class ReadChunk:
def __init__(self, read, chunk, i, n):
self.read_id = "%s:%i:%i" % (read.read_id, i, n)
self.run_id = read.run_id
self.filename = read.filename
self.mux = read.mux
self.channel = read.channel
self.start = read.start
self.duration = read.duration
self.template_start = self.start
self.template_duration = self.duration
self.signal = chunk
def __repr__(self):
return "ReadChunk('%s')" % self.read_id
def trim(signal, window_size=40, threshold_factor=2.4, min_elements=3):
min_trim = 10
signal = signal[min_trim:]
med, mad = med_mad(signal[-(window_size*100):])
threshold = med + mad * threshold_factor
num_windows = len(signal) // window_size
seen_peak = False
for pos in range(num_windows):
start = pos * window_size
end = start + window_size
window = signal[start:end]
if len(window[window > threshold]) > min_elements or seen_peak:
seen_peak = True
if window[-1] > threshold:
continue
return min(end + min_trim, len(signal)), len(signal)
return min_trim, len(signal)
def med_mad(x, factor=1.4826):
"""
Calculate signal median and median absolute deviation
"""
med = np.median(x)
mad = np.median(np.absolute(x - med)) * factor
return med, mad
def norm_by_noisiest_section(signal, samples=100, threshold=6.0):
"""
Normalise using the medmad from the longest continuous region where the
noise is above some threshold relative to the std of the full signal.
"""
threshold = signal.std() / threshold
noise = np.ones(signal.shape)
for idx in np.arange(signal.shape[0] // samples):
window = slice(idx * samples, (idx + 1) * samples)
noise[window] = np.where(signal[window].std() > threshold, 1, 0)
# start and end low for peak finding
noise[0] = 0; noise[-1] = 0
peaks, info = find_peaks(noise, width=(None, None))
if len(peaks):
widest = np.argmax(info['widths'])
med, mad = med_mad(signal[info['left_bases'][widest]: info['right_bases'][widest]])
else:
med, mad = med_mad(signal)
return (signal - med) / mad
def read_chunks(read, chunksize=4000, overlap=400):
"""
Split a Read in fixed sized ReadChunks
"""
if len(read.signal) < chunksize:
return
_, offset = divmod(len(read.signal) - chunksize, chunksize - overlap)
signal = torch.from_numpy(read.signal[offset:])
blocks = signal.unfold(0, chunksize, chunksize - overlap)
for i, block in enumerate(blocks):
yield ReadChunk(read, block.numpy(), i+1, blocks.shape[0])
def get_raw_data(filename, read_ids=None, skip=False):
"""
Get the raw signal and read id from the fast5 files
"""
with get_fast5_file(filename, 'r') as f5_fh:
for read_id in f5_fh.get_read_ids():
if read_ids is None or (read_id in read_ids) ^ skip:
yield Read(f5_fh.get_read(read_id), filename)
def get_read_ids(filename, read_ids=None, skip=False):
"""
Get all the read_ids from the file `filename`.
"""
with get_fast5_file(filename, 'r') as f5_fh:
ids = [(filename, rid) for rid in f5_fh.get_read_ids()]
if read_ids is None:
return ids
return [rid for rid in ids if (rid[1] in read_ids) ^ skip]
def get_raw_data_for_read(info):
"""
Get the raw signal from the fast5 file for a given filename, read_id pair
"""
filename, read_id = info
with get_fast5_file(filename, 'r') as f5_fh:
return Read(f5_fh.get_read(read_id), filename)
def get_reads(directory, read_ids=None, skip=False, max_read_size=0, n_proc=1, recursive=False, cancel=None):
"""
Get all reads in a given `directory`.
"""
pattern = "**/*.fast5" if recursive else "*.fast5"
get_filtered_reads = partial(get_read_ids, read_ids=read_ids, skip=skip)
with Pool(n_proc) as pool:
for job in chain(pool.imap(get_filtered_reads, (Path(x) for x in glob(directory + "/" + pattern, recursive=True)))):
for read in pool.imap(get_raw_data_for_read, job):
if max_read_size > 0 and len(read.signal) > max_read_size:
sys.stderr.write(
"> skipping long read %s (%s samples)\n" % (read.read_id, len(read.signal))
)
continue
yield read
if cancel is not None and cancel.is_set():
return
|
"""
Bonito utils
"""
import os
import re
import sys
import random
from glob import glob
from itertools import groupby
from operator import itemgetter
from importlib import import_module
from collections import deque, defaultdict, OrderedDict
import toml
import torch
import parasail
import numpy as np
from torch.cuda import get_device_capability
try:
from claragenomics.bindings import cuda
from claragenomics.bindings.cudapoa import CudaPoaBatch
except ImportError:
pass
__dir__ = os.path.dirname(os.path.realpath(__file__))
__data__ = os.path.join(__dir__, "data")
__models__ = os.path.join(__dir__, "models")
__configs__ = os.path.join(__dir__, "models/configs")
split_cigar = re.compile(r"(?P<len>\d+)(?P<op>\D+)")
default_data = os.path.join(__data__, "dna_r9.4.1")
default_config = os.path.join(__configs__, "[email protected]")
def init(seed, device):
"""
Initialise random libs and setup cudnn
https://pytorch.org/docs/stable/notes/randomness.html
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == "cpu": return
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
def permute(x, input_layout, output_layout):
"""
Permute `x` from `input_layout` to `output_layout`
>>> permute(x, 'TNC', 'NTC')
"""
if input_layout == output_layout: return x
return x.permute(*[input_layout.index(x) for x in output_layout])
def concat(xs, dim=0):
"""
Type agnostic concat.
"""
if isinstance(xs[0], torch.Tensor):
return torch.cat(xs, dim=dim)
elif isinstance(xs[0], np.ndarray):
return np.concatenate(xs, axis=dim)
elif isinstance(xs[0], list):
return [x for l in xs for x in l]
elif isinstance(xs[0], str):
return ''.join(xs)
elif isinstance(xs[0], dict):
return {k: concat([x[k] for x in xs], dim) for k in xs[0].keys()}
else:
raise TypeError
def select_range(x, start, end, dim=0):
"""
Type agnostic range select.
"""
if isinstance(x, dict):
return {k: select_range(v, start, end, dim) for (k, v) in x.items()}
if dim == 0 or isinstance(x, list): return x[start:end]
return x[(*(slice(None),)*dim, slice(start, end))]
def size(x, dim=0):
"""
Type agnostic size.
"""
if hasattr(x, 'shape'):
return x.shape[dim]
elif dim == 0:
return len(x)
raise TypeError
def half_supported():
"""
Returns whether FP16 is support on the GPU
"""
try:
return get_device_capability()[0] >= 7
except:
return False
def phred(prob, scale=1.0, bias=0.0):
"""
Converts `prob` into a ascii encoded phred quality score between 0 and 40.
"""
p = max(1 - prob, 1e-4)
q = -10 * np.log10(p) * scale + bias
return chr(int(np.round(q) + 33))
def mean_qscore_from_qstring(qstring):
"""
Convert qstring into a mean qscore
"""
if len(qstring) == 0: return 0.0
err_probs = [10**((ord(c) - 33) / -10) for c in qstring]
mean_err = np.mean(err_probs)
return -10 * np.log10(max(mean_err, 1e-4))
def decode_ref(encoded, labels):
"""
Convert a integer encoded reference into a string and remove blanks
"""
return ''.join(labels[e] for e in encoded if e)
def column_to_set(filename, idx=0, skip_header=False):
"""
Pull a column from a file and return a set of the values.
"""
if filename and os.path.isfile(filename):
with open(filename, 'r') as tsv:
if skip_header:
next(tsv)
return {line.strip().split()[idx] for line in tsv.readlines()}
def chunk(signal, chunksize, overlap):
"""
Convert a read into overlapping chunks before calling
"""
T = signal.shape[0]
if chunksize == 0:
chunks = signal[None, :]
elif T < chunksize:
chunks = torch.nn.functional.pad(signal, (chunksize - T, 0))[None, :]
else:
stub = (T - overlap) % (chunksize - overlap)
chunks = signal[stub:].unfold(0, chunksize, chunksize - overlap)
if stub > 0:
chunks = torch.cat([signal[None, :chunksize], chunks], dim=0)
return chunks.unsqueeze(1)
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if chunks.shape[0] == 1: return chunks.squeeze(0)
semi_overlap = overlap // 2
start, end = semi_overlap // stride, (chunksize - semi_overlap) // stride
stub = (length - overlap) % (chunksize - overlap)
first_chunk_end = (stub + semi_overlap) // stride if (stub > 0) else end
if reverse:
chunks = list(chunks)
return concat([
chunks[-1][:-start], *(x[-end:-start] for x in reversed(chunks[1:-1])), chunks[0][-first_chunk_end:]
])
else:
return concat([
chunks[0, :first_chunk_end], *chunks[1:-1, start:end], chunks[-1, start:]
])
def batchify(items, batchsize, dim=0):
"""
Batch up items up to `batch_size`.
"""
stack, pos = [], 0
for k, v in items:
breaks = range(batchsize - pos, size(v, dim), batchsize)
for start, end in zip([0, *breaks], [*breaks, size(v, dim)]):
sub_batch = select_range(v, start, end, dim)
stack.append(((k, (pos, pos + end - start)), sub_batch))
if pos + end - start == batchsize:
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
stack, pos = [], 0
else:
pos += end - start
if len(stack):
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
def unbatchify(batches, dim=0):
"""
Reconstruct batches.
"""
batches = (
(k, select_range(v, start, end, dim))
for sub_batches, v in batches
for k, (start, end) in sub_batches
)
return (
(k, concat([v for (k, v) in group], dim))
for k, group in groupby(batches, itemgetter(0))
)
def load_data(limit=None, directory=None):
"""
Load the training data
"""
if directory is None:
directory = default_data
chunks = np.load(os.path.join(directory, "chunks.npy"), mmap_mode='r')
targets = np.load(os.path.join(directory, "references.npy"), mmap_mode='r')
lengths = np.load(os.path.join(directory, "reference_lengths.npy"), mmap_mode='r')
indices = os.path.join(directory, "indices.npy")
if os.path.exists(indices):
idx = np.load(indices, mmap_mode='r')
idx = idx[idx < lengths.shape[0]]
if limit:
idx = idx[:limit]
return chunks[idx, :], targets[idx, :], lengths[idx]
if limit:
chunks = chunks[:limit]
targets = targets[:limit]
lengths = lengths[:limit]
return np.array(chunks), np.array(targets), np.array(lengths)
def load_symbol(config, symbol):
"""
Dynamic load a symbol from module specified in model config.
"""
if not isinstance(config, dict):
if not os.path.isdir(config) and os.path.isdir(os.path.join(__models__, config)):
dirname = os.path.join(__models__, config)
else:
dirname = config
config = toml.load(os.path.join(dirname, 'config.toml'))
imported = import_module(config['model']['package'])
return getattr(imported, symbol)
def match_names(state_dict, model):
keys_and_shapes = lambda state_dict: zip(*[
(k, s) for s, i, k in sorted([(v.shape, i, k)
for i, (k, v) in enumerate(state_dict.items())])
])
k1, s1 = keys_and_shapes(state_dict)
k2, s2 = keys_and_shapes(model.state_dict())
assert s1 == s2
remap = dict(zip(k1, k2))
return OrderedDict([(k, remap[k]) for k in state_dict.keys()])
def load_model(dirname, device, weights=None, half=None, chunksize=0):
"""
Load a model from disk
"""
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
if not weights: # take the latest checkpoint
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if not weight_files:
raise FileNotFoundError("no model weights found in '%s'" % dirname)
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
device = torch.device(device)
config = toml.load(os.path.join(dirname, 'config.toml'))
weights = os.path.join(dirname, 'weights_%s.tar' % weights)
Model = load_symbol(config, "Model")
model = Model(config)
state_dict = torch.load(weights, map_location=device)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if half is None:
half = half_supported()
if half: model = model.half()
model.eval()
model.to(device)
return model
def parasail_to_sam(result, seq):
"""
Extract reference start and sam compatible cigar string.
:param result: parasail alignment result.
:param seq: query sequence.
:returns: reference start coordinate, cigar string.
"""
cigstr = result.cigar.decode.decode()
first = re.search(split_cigar, cigstr)
first_count, first_op = first.groups()
prefix = first.group()
rstart = result.cigar.beg_ref
cliplen = result.cigar.beg_query
clip = '' if cliplen == 0 else '{}S'.format(cliplen)
if first_op == 'I':
pre = '{}S'.format(int(first_count) + cliplen)
elif first_op == 'D':
pre = clip
rstart = int(first_count)
else:
pre = '{}{}'.format(clip, prefix)
mid = cigstr[len(prefix):]
end_clip = len(seq) - result.end_query - 1
suf = '{}S'.format(end_clip) if end_clip > 0 else ''
new_cigstr = ''.join((pre, mid, suf))
return rstart, new_cigstr
def accuracy(ref, seq, balanced=False, min_coverage=0.0):
"""
Calculate the accuracy between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
counts = defaultdict(int)
q_coverage = len(alignment.traceback.query) / len(seq)
r_coverage = len(alignment.traceback.ref) / len(ref)
if r_coverage < min_coverage:
return 0.0
_, cigar = parasail_to_sam(alignment, seq)
for count, op in re.findall(split_cigar, cigar):
counts[op] += int(count)
if balanced:
accuracy = (counts['='] - counts['I']) / (counts['='] + counts['X'] + counts['D'])
else:
accuracy = counts['='] / (counts['='] + counts['I'] + counts['X'] + counts['D'])
return accuracy * 100
def print_alignment(ref, seq):
"""
Print the alignment between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
print(alignment.traceback.ref)
print(alignment.traceback.comp)
print(alignment.traceback.query)
print(" Score=%s" % alignment.score)
return alignment.score
def poa(groups, max_poa_sequences=100, gpu_mem_per_batch=0.9):
"""
Generate consensus for POA groups.
Args:
groups : A list of lists of sequences for which consensus is to be generated.
"""
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch *= free
batch = CudaPoaBatch(max_poa_sequences, gpu_mem_per_batch, stream=None, output_type="consensus")
results = []
for i, group in enumerate(groups, start=1):
group_status, seq_status = batch.add_poa_group(group)
# Once batch is full, run POA processing
if group_status == 1 or i == len(groups):
batch.generate_poa()
consensus, coverage, status = batch.get_consensus()
results.extend(consensus)
batch.reset()
group_status, seq_status = batch.add_poa_group(group)
return results
|
"""
Bonito nn modules.
"""
import torch
from torch import nn
from torch.nn import Module
from torch.nn.init import orthogonal_
layers = {}
def register(layer):
layer.name = layer.__name__.lower()
layers[layer.name] = layer
return layer
register(torch.nn.ReLU)
register(torch.nn.Tanh)
@register
class Swish(torch.nn.SiLU):
pass
@register
class Serial(torch.nn.Sequential):
def __init__(self, sublayers):
super().__init__(*sublayers)
def to_dict(self, include_weights=False):
return {
'sublayers': [to_dict(layer, include_weights) for layer in self._modules.values()]
}
@register
class Reverse(Module):
def __init__(self, sublayers):
super().__init__()
self.layer = Serial(sublayers) if isinstance(sublayers, list) else sublayers
def forward(self, x):
return self.layer(x.flip(0)).flip(0)
def to_dict(self, include_weights=False):
if isinstance(self.layer, Serial):
return self.layer.to_dict(include_weights)
else:
return {'sublayers': to_dict(self.layer, include_weights)}
@register
class Convolution(Module):
def __init__(self, insize, size, winlen, stride=1, padding=0, bias=True, activation=None):
super().__init__()
self.conv = torch.nn.Conv1d(insize, size, winlen, stride=stride, padding=padding, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
def forward(self, x):
if self.activation is not None:
return self.activation(self.conv(x))
return self.conv(x)
def to_dict(self, include_weights=False):
res = {
"insize": self.conv.in_channels,
"size": self.conv.out_channels,
"bias": self.conv.bias is not None,
"winlen": self.conv.kernel_size[0],
"stride": self.conv.stride[0],
"padding": self.conv.padding[0],
"activation": self.activation.name if self.activation else None,
}
if include_weights:
res['params'] = {
'W': self.conv.weight, 'b': self.conv.bias if self.conv.bias is not None else []
}
return res
@register
class LinearCRFEncoder(Module):
def __init__(self, insize, n_base, state_len, bias=True, scale=None, activation=None, blank_score=None):
super().__init__()
self.n_base = n_base
self.state_len = state_len
self.blank_score = blank_score
size = (n_base + 1) * n_base**state_len if blank_score is None else n_base**(state_len + 1)
self.linear = torch.nn.Linear(insize, size, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
self.scale = scale
def forward(self, x):
scores = self.linear(x)
if self.activation is not None:
scores = self.activation(scores)
if self.scale is not None:
scores = scores * self.scale
if self.blank_score is not None:
T, N, C = scores.shape
s = torch.tensor(self.blank_score, device=scores.device, dtype=scores.dtype)
scores = torch.cat([s.expand(T, N, C//self.n_base, 1), scores.reshape(T, N, C//self.n_base, self.n_base)], axis=-1).reshape(T, N, -1)
return scores
def to_dict(self, include_weights=False):
res = {
'insize': self.linear.in_features,
'n_base': self.n_base,
'state_len': self.state_len,
'bias': self.linear.bias is not None,
'scale': self.scale,
'activation': self.activation.name if self.activation else None,
'blank_score': self.blank_score,
}
if include_weights:
res['params'] = {
'W': self.linear.weight, 'b': self.linear.bias
if self.linear.bias is not None else []
}
return res
@register
class SHA(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** -0.5
self.to_q = nn.Sequential(nn.Linear(dim, dim), nn.LayerNorm(dim))
def forward(self, x, kv):
x = x.transpose(0, 1)
kv = kv.transpose(0, 1)
q = self.to_q(x)
sim = torch.matmul(q, kv.transpose(-1, -2)) * self.scale
attn = sim.softmax(dim=-1)
out = torch.matmul(attn, kv)
return out.transpose(0, 1)
@register
class SHABlock(Module):
""" https://arxiv.org/abs/1911.11423 """
def __init__(self, dim, ff_mult=4):
super().__init__()
self.attn_query_norm = nn.LayerNorm(dim)
self.attn_kv_norm = nn.LayerNorm(dim)
self.attn = SHA(dim=dim)
self.ff_residual_norm = nn.LayerNorm(dim)
self.ff = Serial([
nn.LayerNorm(dim),
nn.Linear(dim, dim * ff_mult),
nn.GELU(),
nn.Linear(dim * ff_mult, dim),
])
def forward(self, x):
kv = self.attn_kv_norm(x)
x = self.attn_query_norm(x)
x = self.attn(x, kv) + x
x = self.ff(x) + self.ff_residual_norm(x)
return x
@register
class Permute(Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
def to_dict(self, include_weights=False):
return {'dims': self.dims}
def truncated_normal(size, dtype=torch.float32, device=None, num_resample=5):
x = torch.empty(size + (num_resample,), dtype=torch.float32, device=device).normal_()
i = ((x < 2) & (x > -2)).max(-1, keepdim=True)[1]
return torch.clamp_(x.gather(-1, i).squeeze(-1), -2, 2)
class RNNWrapper(Module):
def __init__(
self, rnn_type, *args, reverse=False, orthogonal_weight_init=True, disable_state_bias=True, bidirectional=False, **kwargs
):
super().__init__()
if reverse and bidirectional:
raise Exception("'reverse' and 'bidirectional' should not both be set to True")
self.reverse = reverse
self.rnn = rnn_type(*args, bidirectional=bidirectional, **kwargs)
self.init_orthogonal(orthogonal_weight_init)
self.init_biases()
if disable_state_bias: self.disable_state_bias()
def forward(self, x):
if self.reverse: x = x.flip(0)
y, h = self.rnn(x)
if self.reverse: y = y.flip(0)
return y
def init_biases(self, types=('bias_ih',)):
for name, param in self.rnn.named_parameters():
if any(k in name for k in types):
with torch.no_grad():
param.set_(0.5*truncated_normal(param.shape, dtype=param.dtype, device=param.device))
def init_orthogonal(self, types=True):
if not types: return
if types == True: types = ('weight_ih', 'weight_hh')
for name, x in self.rnn.named_parameters():
if any(k in name for k in types):
for i in range(0, x.size(0), self.rnn.hidden_size):
orthogonal_(x[i:i+self.rnn.hidden_size])
def disable_state_bias(self):
for name, x in self.rnn.named_parameters():
if 'bias_hh' in name:
x.requires_grad = False
x.zero_()
@register
class LSTM(RNNWrapper):
def __init__(self, size, insize, bias=True, reverse=False):
super().__init__(torch.nn.LSTM, size, insize, bias=bias, reverse=reverse)
def to_dict(self, include_weights=False):
res = {
'size': self.rnn.hidden_size,
'insize': self.rnn.input_size,
'bias': self.rnn.bias,
'reverse': self.reverse,
}
if include_weights:
res['params'] = {
'iW': self.rnn.weight_ih_l0.reshape(4, self.rnn.hidden_size, self.rnn.input_size),
'sW': self.rnn.weight_hh_l0.reshape(4, self.rnn.hidden_size, self.rnn.hidden_size),
'b': self.rnn.bias_ih_l0.reshape(4, self.rnn.hidden_size)
}
return res
def to_dict(layer, include_weights=False):
if hasattr(layer, 'to_dict'):
return {'type': layer.name, **layer.to_dict(include_weights)}
return {'type': layer.name}
def from_dict(model_dict, layer_types=None):
model_dict = model_dict.copy()
if layer_types is None:
layer_types = layers
type_name = model_dict.pop('type')
typ = layer_types[type_name]
if 'sublayers' in model_dict:
sublayers = model_dict['sublayers']
model_dict['sublayers'] = [
from_dict(x, layer_types) for x in sublayers
] if isinstance(sublayers, list) else from_dict(sublayers, layer_types)
try:
layer = typ(**model_dict)
except Exception as e:
raise Exception(f'Failed to build layer of type {typ} with args {model_dict}') from e
return layer
|
"""
Bonito Input/Output
"""
import os
import sys
import csv
import pandas as pd
from warnings import warn
from threading import Thread
from logging import getLogger
from contextlib import contextmanager
from os.path import realpath, splitext, dirname
import numpy as np
from mappy import revcomp
import bonito
from bonito.cli.convert import typical_indices
logger = getLogger('bonito')
class CSVLogger:
def __init__(self, filename, sep=','):
self.filename = str(filename)
if os.path.exists(self.filename):
with open(self.filename) as f:
self.columns = csv.DictReader(f).fieldnames
else:
self.columns = None
self.fh = open(self.filename, 'a', newline='')
self.csvwriter = csv.writer(self.fh, delimiter=sep)
self.count = 0
def set_columns(self, columns):
if self.columns:
raise Exception('Columns already set')
self.columns = list(columns)
self.csvwriter.writerow(self.columns)
def append(self, row):
if self.columns is None:
self.set_columns(row.keys())
self.csvwriter.writerow([row.get(k, '-') for k in self.columns])
self.count += 1
if self.count > 100:
self.count = 0
self.fh.flush()
def close(self):
self.fh.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@contextmanager
def devnull(*args, **kwds):
"""
A context manager that sends all out stdout & stderr to devnull.
"""
save_fds = [os.dup(1), os.dup(2)]
null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
try:
yield
finally:
os.dup2(save_fds[0], 1)
os.dup2(save_fds[1], 2)
for fd in null_fds + save_fds: os.close(fd)
def write_fasta(header, sequence, fd=sys.stdout):
"""
Write a fasta record to a file descriptor.
"""
fd.write(">%s\n" % header)
fd.write("%s\n" % sequence)
fd.flush()
def write_fastq(header, sequence, qstring, fd=sys.stdout):
"""
Write a fastq record to a file descriptor.
"""
fd.write("@%s\n" % header)
fd.write("%s\n" % sequence)
fd.write("+\n")
fd.write("%s\n" % qstring)
fd.flush()
def write_sam_header(aligner, fd=sys.stdout, sep='\t'):
"""
Write the SQ & PG sam headers to a file descriptor.
"""
fd.write('%s\n' % os.linesep.join([
sep.join([
'@SQ', 'SN:%s' % name, 'LN:%s' % len(aligner.seq(name))
]) for name in aligner.seq_names
]))
fd.write('%s\n' % sep.join([
'@PG',
'ID:bonito',
'PN:bonito',
'VN:%s' % bonito.__version__,
'CL:%s' % ' '.join(sys.argv),
]))
fd.flush()
def write_sam(read_id, sequence, qstring, mapping, fd=sys.stdout, unaligned=False, sep='\t'):
"""
Write a sam record to a file descriptor.
"""
if unaligned:
fd.write("%s\n" % sep.join(map(str, [
read_id, 4, '*', 0, 0, '*', '*', 0, 0, sequence, qstring, 'NM:i:0'
])))
else:
softclip = [
'%sS' % mapping.q_st if mapping.q_st else '',
mapping.cigar_str,
'%sS' % (len(sequence) - mapping.q_en) if len(sequence) - mapping.q_en else ''
]
fd.write("%s\n" % sep.join(map(str, [
read_id,
0 if mapping.strand == +1 else 16,
mapping.ctg,
mapping.r_st + 1,
mapping.mapq,
''.join(softclip if mapping.strand == +1 else softclip[::-1]),
'*', 0, 0,
sequence if mapping.strand == +1 else revcomp(sequence),
qstring,
'NM:i:%s' % mapping.NM,
'MD:Z:%s' % mapping.MD,
])))
fd.flush()
def summary_file():
"""
Return the filename to use for the summary tsv.
"""
stdout = realpath('/dev/fd/1')
if sys.stdout.isatty() or stdout.startswith('/proc'):
return 'summary.tsv'
return '%s_summary.tsv' % splitext(stdout)[0]
summary_field_names = [
'filename',
'read_id',
'run_id',
'channel',
'mux',
'start_time',
'duration',
'template_start',
'template_duration',
'sequence_length_template',
'mean_qscore_template',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def summary_row(read, seqlen, qscore, alignment=False):
"""
Summary tsv row.
"""
fields = [
read.filename,
read.read_id,
read.run_id,
read.channel,
read.mux,
read.start,
read.duration,
read.template_start,
read.template_duration,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(summary_field_names, fields))
duplex_summary_field_names = [
'filename_template',
'read_id_template',
'filename_complement',
'read_id_complement',
'run_id',
'channel_template',
'mux_template',
'channel_complement',
'mux_complement',
'sequence_length_duplex',
'mean_qscore_duplex',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def duplex_summary_row(read_temp, comp_read, seqlen, qscore, alignment=False):
"""
Duplex summary tsv row.
"""
fields = [
read_temp.filename,
read_temp.read_id,
comp_read.filename,
comp_read.read_id,
read_temp.run_id,
read_temp.channel,
read_temp.mux,
comp_read.channel,
comp_read.mux,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(duplex_summary_field_names, fields))
class Writer(Thread):
def __init__(self, iterator, aligner, fd=sys.stdout, fastq=False, duplex=False):
super().__init__()
self.fd = fd
self.log = []
self.fastq = fastq
self.duplex = duplex
self.aligner = aligner
self.iterator = iterator
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
with CSVLogger(summary_file(), sep='\t') as summary:
for read, res in self.iterator:
seq = res['sequence']
qstring = res.get('qstring', '*')
mean_qscore = res.get('mean_qscore', 0.0)
mapping = res.get('mapping', False)
if self.duplex:
samples = len(read[0].signal) + len(read[1].signal)
read_id = '%s;%s' % (read[0].read_id, read[1].read_id)
else:
samples = len(read.signal)
read_id = read.read_id
if len(seq):
if self.aligner:
write_sam(read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
else:
if self.fastq:
write_fastq(read_id, seq, qstring, fd=self.fd)
else:
write_fasta(read_id, seq, fd=self.fd)
if self.duplex:
summary.append(duplex_summary_row(read[0], read[1], len(seq), mean_qscore, alignment=mapping))
else:
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
self.log.append((read_id, samples))
else:
logger.warn("> skipping empty sequence %s", read_id)
class CTCWriter(Thread):
"""
CTC writer process that writes output numpy training data.
"""
def __init__(self, iterator, aligner, min_coverage, min_accuracy, fd=sys.stdout):
super().__init__()
self.fd = fd
self.log = []
self.aligner = aligner
self.iterator = iterator
self.min_coverage = min_coverage
self.min_accuracy = min_accuracy
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
chunks = []
targets = []
lengths = []
with CSVLogger(summary_file(), sep='\t') as summary:
for read, ctc_data in self.iterator:
seq = ctc_data['sequence']
qstring = ctc_data['qstring']
mean_qscore = ctc_data['mean_qscore']
mapping = ctc_data.get('mapping', False)
self.log.append((read.read_id, len(read.signal)))
if len(seq) == 0 or mapping is None:
continue
cov = (mapping.q_en - mapping.q_st) / len(seq)
acc = mapping.mlen / mapping.blen
refseq = self.aligner.seq(mapping.ctg, mapping.r_st, mapping.r_en)
if acc < self.min_accuracy or cov < self.min_coverage or 'N' in refseq:
continue
write_sam(read.read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
if mapping.strand == -1:
refseq = revcomp(refseq)
target = [int(x) for x in refseq.translate({65: '1', 67: '2', 71: '3', 84: '4'})]
targets.append(target)
chunks.append(read.signal)
lengths.append(len(target))
if len(chunks) == 0:
sys.stderr.write("> no suitable ctc data to write\n")
return
chunks = np.array(chunks, dtype=np.float16)
targets_ = np.zeros((chunks.shape[0], max(lengths)), dtype=np.uint8)
for idx, target in enumerate(targets): targets_[idx, :len(target)] = target
lengths = np.array(lengths, dtype=np.uint16)
indices = np.random.permutation(typical_indices(lengths))
chunks = chunks[indices]
targets_ = targets_[indices]
lengths = lengths[indices]
summary = pd.read_csv(summary_file(), sep='\t')
summary.iloc[indices].to_csv(summary_file(), sep='\t', index=False)
output_directory = '.' if sys.stdout.isatty() else dirname(realpath('/dev/fd/1'))
np.save(os.path.join(output_directory, "chunks.npy"), chunks)
np.save(os.path.join(output_directory, "references.npy"), targets_)
np.save(os.path.join(output_directory, "reference_lengths.npy"), lengths)
sys.stderr.write("> written ctc training data\n")
sys.stderr.write(" - chunks.npy with shape (%s)\n" % ','.join(map(str, chunks.shape)))
sys.stderr.write(" - references.npy with shape (%s)\n" % ','.join(map(str, targets_.shape)))
sys.stderr.write(" - reference_lengths.npy shape (%s)\n" % ','.join(map(str, lengths.shape)))
def stop(self):
self.join()
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from bonito.cli import basecaller, train, evaluate, view, convert, download, export, duplex
modules = [
'basecaller', 'train', 'evaluate', 'view', 'convert', 'download', 'export', 'duplex',
]
__version__ = '0.4.0'
def main():
parser = ArgumentParser(
'bonito',
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {}'.format(__version__)
)
subparsers = parser.add_subparsers(
title='subcommands', description='valid commands',
help='additional help', dest='command'
)
subparsers.required = True
for module in modules:
mod = globals()[module]
p = subparsers.add_parser(module, parents=[mod.argparser()])
p.set_defaults(func=mod.main)
args = parser.parse_args()
args.func(args)
|
"""
Bonito Multiprocesing
"""
import queue
from itertools import count
from threading import Thread
from functools import partial
from collections import deque
from signal import signal, SIGINT
from multiprocessing import Process, Queue, Event, Lock, cpu_count
def process_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another process.
"""
return iter(ProcessIterator(iterator, maxsize=maxsize))
def thread_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another thread.
"""
return iter(ThreadIterator(iterator, maxsize=maxsize))
def process_cancel():
"""
Register an cancel event on sigint
"""
event = Event()
signal(SIGINT, lambda *a: event.set())
return event
def process_map(func, iterator, n_proc=4, maxsize=0):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_proc` processes.
"""
if n_proc == 0: return ((k, func(v)) for k, v in iterator)
return iter(ProcessMap(func, iterator, n_proc, output_queue=Queue(maxsize)))
def thread_map(func, iterator, n_thread=4, maxsize=2):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_thread` threads.
"""
if n_thread == 0: return ((k, func(v)) for k, v in iterator)
return iter(ThreadMap(partial(MapWorkerThread, func), iterator, n_thread, maxsize=maxsize))
class BackgroundIterator:
"""
Runs an iterator in the background.
"""
def __init__(self, iterator, maxsize=10):
super().__init__()
self.iterator = iterator
self.queue = self.QueueClass(maxsize)
def __iter__(self):
self.start()
while True:
item = self.queue.get()
if item is StopIteration:
break
yield item
def run(self):
for item in self.iterator:
self.queue.put(item)
self.queue.put(StopIteration)
def stop(self):
self.join()
class ThreadIterator(BackgroundIterator, Thread):
"""
Runs an iterator in a separate process.
"""
QueueClass = queue.Queue
class ProcessIterator(BackgroundIterator, Process):
"""
Runs an iterator in a separate process.
"""
QueueClass = Queue
class MapWorker(Process):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue, output_queue):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ProcessMap(Thread):
def __init__(self, func, iterator, n_proc, output_queue=None):
super().__init__()
self.key_map = {}
self.iterator = iterator
self.work_queue = Queue(n_proc * 2)
self.output_queue = output_queue or Queue()
self.processes = [MapWorker(func, self.work_queue, self.output_queue) for _ in range(n_proc)]
def start(self):
for process in self.processes:
process.start()
super().start()
def run(self):
for (k, v) in self.iterator:
self.work_queue.put((id(k), v))
self.key_map[id(k)] = k
for _ in self.processes:
self.work_queue.put(StopIteration)
for process in self.processes:
process.join()
self.output_queue.put(StopIteration)
def __iter__(self):
self.start()
while True:
item = self.output_queue.get()
if item is StopIteration:
break
k, v = item
yield self.key_map.pop(k), v
class MapWorkerThread(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue=None, output_queue=None):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ThreadMap(Thread):
def __init__(self, worker_type, iterator, n_thread, maxsize=2):
super().__init__()
self.iterator = iterator
self.n_thread = n_thread
self.work_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.output_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.workers = [worker_type(input_queue=in_q, output_queue=out_q) for (in_q, out_q) in zip(self.work_queues, self.output_queues)]
def start(self):
for worker in self.workers:
worker.start()
super().start()
def __iter__(self):
self.start()
for i in count():
item = self.output_queues[i % self.n_thread].get()
if item is StopIteration:
#do we need to empty output_queues in order to join worker threads?
for j in range(i + 1, i + self.n_thread):
self.output_queues[j % self.n_thread].get()
break
yield item
def run(self):
for i, (k, v) in enumerate(self.iterator):
self.work_queues[i % self.n_thread].put((k, v))
for q in self.work_queues:
q.put(StopIteration)
for worker in self.workers:
worker.join()
|
"""
Bonito train
"""
import os
import re
from glob import glob
from functools import partial
from time import perf_counter
from collections import OrderedDict
from datetime import datetime
from bonito.util import accuracy, decode_ref, permute, concat, match_names
import bonito
import torch
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from torch.optim.lr_scheduler import LambdaLR
import torch.cuda.amp as amp
class ChunkDataSet:
def __init__(self, chunks, targets, lengths):
self.chunks = np.expand_dims(chunks, axis=1)
self.targets = targets
self.lengths = lengths
def __getitem__(self, i):
return (
self.chunks[i].astype(np.float32),
self.targets[i].astype(np.int64),
self.lengths[i].astype(np.int64),
)
def __len__(self):
return len(self.lengths)
def const_schedule(y):
"""
Constant Scheduler
"""
return lambda t: y
def linear_schedule(y0, y1):
"""
Linear Scheduler
"""
return lambda t: y0 + (y1 - y0) * t
def cosine_decay_schedule(y0, y1):
"""
Cosine Decay Scheduler
"""
return lambda t: y1 + 0.5 * (y0 - y1) * (np.cos(t * np.pi) + 1.0)
def piecewise_schedule(knots, funcs):
"""
Piecewise Scheduler
"""
def f(t):
i = np.searchsorted(knots, t)
t0 = 0.0 if i == 0 else knots[i - 1]
t1 = 1.0 if i == len(knots) else knots[i]
return funcs[i]((t - t0) / (t1 - t0))
return f
def func_scheduler(optimizer, func, total_steps, warmup_steps=None, warmup_ratio=0.1, start_step=0):
"""
Learning Rate Scheduler
"""
if warmup_steps:
y0 = func(0.0)
func = piecewise_schedule(
[warmup_steps / total_steps],
[linear_schedule(warmup_ratio * y0, y0), func]
)
return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps)))
def load_state(dirname, device, model):
"""
Load a model state dict from disk
"""
model.to(device)
weight_no = None
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if weight_files:
weight_no = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
if weight_no:
print("[picking up from epoch %s]" % weight_no)
state_dict = torch.load(
os.path.join(dirname, 'weights_%s.tar' % weight_no), map_location=device
)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
epoch = weight_no
else:
epoch = 0
return epoch
class Trainer:
def __init__(self, model, device, train_loader, valid_loader, criterion=None, use_amp=True):
self.model = model.to(device)
self.device = device
self.train_loader = train_loader
self.valid_loader = valid_loader
self.criterion = criterion or (model.seqdist.ctc_loss if hasattr(model, 'seqdist') else model.ctc_label_smoothing_loss)
self.use_amp = use_amp
self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
self.optimizer = None
def train_one_step(self, batch):
data, targets, lengths = batch
self.optimizer.zero_grad()
with amp.autocast(enabled=self.use_amp):
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
if not isinstance(losses, dict):
losses = {'loss': losses}
self.scaler.scale(losses['loss']).backward()
self.scaler.unscale_(self.optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=2.0).item()
self.scaler.step(self.optimizer)
self.scaler.update()
return losses, grad_norm
def train_one_epoch(self, loss_log, lr_scheduler):
t0 = perf_counter()
chunks = 0
self.model.train()
progress_bar = tqdm(
total=len(self.train_loader), desc='[0/{}]'.format(len(self.train_loader.dataset)),
ascii=True, leave=True, ncols=100, bar_format='{l_bar}{bar}| [{elapsed}{postfix}]'
)
smoothed_loss = None
with progress_bar:
for batch in self.train_loader:
chunks += batch[0].shape[0]
losses, grad_norm = self.train_one_step(batch)
losses = {k: v.item() for k,v in losses.items()}
if lr_scheduler is not None: lr_scheduler.step()
smoothed_loss = losses['loss'] if smoothed_loss is None else (0.01 * losses['loss'] + 0.99 * smoothed_loss)
progress_bar.set_postfix(loss='%.4f' % smoothed_loss)
progress_bar.set_description("[{}/{}]".format(chunks, len(self.train_loader.dataset)))
progress_bar.update()
if loss_log is not None:
loss_log.append({'chunks': chunks, 'time': perf_counter() - t0, 'grad_norm': grad_norm, **losses})
return smoothed_loss, perf_counter() - t0
def validate_one_step(self, batch):
data, targets, lengths = batch
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
losses = {k: v.item() for k, v in losses.items()} if isinstance(losses, dict) else losses.item()
if hasattr(self.model, 'decode_batch'):
seqs = self.model.decode_batch(scores)
else:
seqs = [self.model.decode(x) for x in permute(scores, 'TNC', 'NTC')]
refs = [decode_ref(target, self.model.alphabet) for target in targets]
accs = [
accuracy(ref, seq, min_coverage=0.5) if len(seq) else 0. for ref, seq in zip(refs, seqs)
]
return seqs, refs, accs, losses
def validate_one_epoch(self):
self.model.eval()
with torch.no_grad():
seqs, refs, accs, losses = zip(*(self.validate_one_step(batch) for batch in self.valid_loader))
seqs, refs, accs = (sum(x, []) for x in (seqs, refs, accs))
loss = np.mean([(x['ctc_loss'] if isinstance(x, dict) else x) for x in losses])
return loss, np.mean(accs), np.median(accs)
def init_optimizer(self, lr, **kwargs):
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, **kwargs)
def get_lr_scheduler(self, epochs, last_epoch=0):
return func_scheduler(
self.optimizer, cosine_decay_schedule(1.0, 0.1), epochs * len(self.train_loader),
warmup_steps=500,
start_step=last_epoch*len(self.train_loader)
)
def fit(self, workdir, epochs=1, lr=2e-3, last_epoch=0):
if self.optimizer is None:
self.init_optimizer(lr)
lr_scheduler = self.get_lr_scheduler(epochs, last_epoch=last_epoch)
for epoch in range(1 + last_epoch, epochs + 1 + last_epoch):
try:
with bonito.io.CSVLogger(os.path.join(workdir, 'losses_{}.csv'.format(epoch))) as loss_log:
train_loss, duration = self.train_one_epoch(loss_log, lr_scheduler)
model_state = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict()
torch.save(model_state, os.path.join(workdir, "weights_%s.tar" % epoch))
val_loss, val_mean, val_median = self.validate_one_epoch()
except KeyboardInterrupt:
break
print("[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%".format(
epoch, workdir, val_loss, val_mean, val_median
))
with bonito.io.CSVLogger(os.path.join(workdir, 'training.csv')) as training_log:
training_log.append({
'time': datetime.today(),
'duration': int(duration),
'epoch': epoch,
'train_loss': train_loss,
'validation_loss': val_loss,
'validation_mean': val_mean,
'validation_median': val_median
}) |
"""
Bonito Download
"""
import os
import re
from shutil import rmtree
from zipfile import ZipFile
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.util import __data__, __models__
from bonito.cli.convert import main as convert
from bonito.cli.convert import argparser as cargparser
import requests
from tqdm import tqdm
class File:
"""
Small class for downloading models and training assets.
"""
__url__ = "https://nanoporetech.box.com/shared/static/"
def __init__(self, path, url_frag, force):
self.path = path
self.force = force
self.url = os.path.join(self.__url__, url_frag)
def location(self, filename):
return os.path.join(self.path, filename)
def exists(self, filename):
return os.path.exists(self.location(filename))
def download(self):
"""
Download the remote file
"""
# create the requests for the file
req = requests.get(self.url, stream=True)
total = int(req.headers.get('content-length', 0))
fname = re.findall('filename="([^"]+)', req.headers['content-disposition'])[0]
# skip download if local file is found
if self.exists(fname.strip('.zip')) and not self.force:
print("[skipping %s]" % fname)
return
if self.exists(fname.strip('.zip')) and self.force:
rmtree(self.location(fname.strip('.zip')))
# download the file
with tqdm(total=total, unit='iB', ascii=True, ncols=100, unit_scale=True, leave=False) as t:
with open(self.location(fname), 'wb') as f:
for data in req.iter_content(1024):
f.write(data)
t.update(len(data))
print("[downloaded %s]" % fname)
# unzip .zip files
if fname.endswith('.zip'):
with ZipFile(self.location(fname), 'r') as zfile:
zfile.extractall(self.path)
os.remove(self.location(fname))
# convert chunkify training files to bonito
if fname.endswith('.hdf5'):
print("[converting %s]" % fname)
args = cargparser().parse_args([
self.location(fname),
self.location(fname).strip('.hdf5')
])
convert(args)
r9_models = [
"n8c07gc9ro09zt0ivgcoeuz6krnwsnf6.zip", # dna_r9.4.1@v1
"nas0uhf46fd1lh2jndhx2a54a9vvhxp4.zip", # dna_r9.4.1@v2
"1wodp3ur4jhvqvu5leowfg6lrw54jxp2.zip", # dna_r9.4.1@v3
"uetgwsnb8yfqvuyoka8p09mxilgskqc7.zip", # [email protected]
"47t2y48zw4waly25lmzx6sagf4bbbqqz.zip", # [email protected]
"hrv649cvx8lvomu1u0tsd47e5u2bbabt.zip", # [email protected]
"arqi4qwcj9btsd6bbjsnlbai0s6dg8yd.zip",
]
r10_models = [
"e70s615lh3i24rkhz006i0e4u4m8y2xa.zip", # dna_r10.3_q20ea
"hnr5mwlm8vmdsfpvn5fsxn3mvhbucy5f.zip", # dna_r10.3@v3
"yesf11tisfrncmod5hj2xtx9kbdveuqt.zip", # [email protected]
"ci6xdu7d4wczmhorhw1sweyg4gczx97t.zip", # [email protected]
"4cunv5z7nwjag7v2bun0g7vk2lf8rqnc.zip",
]
training = [
"cmh91cxupa0are1kc3z9aok425m75vrb.hdf5",
]
def main(args):
"""
Download models and training sets
"""
if args.models or args.all:
print("[downloading models]")
for model in r9_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
for model in r10_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
if args.training or args.all:
print("[downloading training data]")
for train in training:
File(__data__, train, args.force).download()
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
group = parser.add_mutually_exclusive_group()
group.add_argument('--all', action='store_true')
group.add_argument('--models', action='store_true')
group.add_argument('--training', action='store_true')
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('--latest', action='store_true')
return parser
|
#!/usr/bin/env python
"""
Convert a Taiyaki chunkify training file to set of Bonito CTC .npy files
"""
import os
import h5py
import random
import numpy as np
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import islice as take
from argparse import ArgumentDefaultsHelpFormatter
from tqdm import tqdm
from bonito.training import ChunkDataSet
def align(samples, pointers, reference):
""" align to the start of the mapping """
squiggle_duration = len(samples)
mapped_off_the_start = len(pointers[pointers < 0])
mapped_off_the_end = len(pointers[pointers >= squiggle_duration])
pointers = pointers[mapped_off_the_start:len(pointers) - mapped_off_the_end]
reference = reference[mapped_off_the_start:len(reference) - mapped_off_the_end]
return samples[pointers[0]:pointers[-1]], pointers - pointers[0], reference
def scale(read, normalise=True):
""" scale and normalise a read """
samples = read['Dacs'][:]
scaling = read.attrs['range'] / read.attrs['digitisation']
scaled = (scaling * (samples + read.attrs['offset'])).astype(np.float32)
if normalise:
return (scaled - read.attrs['shift_frompA']) / read.attrs['scale_frompA']
return scaled
def pad_lengths(ragged_array, max_len=None):
lengths = np.array([len(x) for x in ragged_array], dtype=np.uint16)
padded = np.zeros((len(ragged_array), max_len or np.max(lengths)), dtype=ragged_array[0].dtype)
for x, y in zip(ragged_array, padded):
y[:len(x)] = x
return padded, lengths
def regular_break_points(n, chunk_len, overlap=0, align='mid'):
num_chunks, remainder = divmod(n - overlap, chunk_len - overlap)
start = {'left': 0, 'mid': remainder // 2, 'right': remainder}[align]
starts = np.arange(start, start + num_chunks*(chunk_len - overlap), (chunk_len - overlap))
return np.vstack([starts, starts + chunk_len]).T
def get_chunks(read, break_points):
sample = scale(read)
pointers = read['Ref_to_signal'][:]
target = read['Reference'][:] + 1 # CTC convention
return (
(sample[i:j], target[ti:tj]) for (i, j), (ti, tj)
in zip(break_points, np.searchsorted(pointers, break_points))
)
def chunk_dataset(reads, chunk_len, num_chunks=None):
all_chunks = (
(chunk, target) for read in reads for chunk, target in
get_chunks(reads[read], regular_break_points(len(reads[read]['Dacs']), chunk_len))
)
chunks, targets = zip(*tqdm(take(all_chunks, num_chunks), total=num_chunks))
targets, target_lens = pad_lengths(targets) # convert refs from ragged arrray
return ChunkDataSet(chunks, targets, target_lens)
def validation_split(reads, num_valid=1000):
reads = np.random.permutation(sorted(reads.items()))
return OrderedDict(reads[:-num_valid]), OrderedDict(reads[-num_valid:])
def typical_indices(x, n=2.5):
mu, sd = np.mean(x), np.std(x)
idx, = np.where((mu - n*sd < x) & (x < mu + n*sd))
return idx
def filter_chunks(ds, idx):
filtered = ChunkDataSet(ds.chunks.squeeze(1)[idx], ds.targets[idx], ds.lengths[idx])
filtered.targets = filtered.targets[:, :filtered.lengths.max()]
return filtered
def save_chunks(chunks, output_directory):
os.makedirs(output_directory, exist_ok=True)
np.save(os.path.join(output_directory, "chunks.npy"), chunks.chunks.squeeze(1))
np.save(os.path.join(output_directory, "references.npy"), chunks.targets)
np.save(os.path.join(output_directory, "reference_lengths.npy"), chunks.lengths)
print()
print("> data written to %s:" % output_directory)
print(" - chunks.npy with shape", chunks.chunks.squeeze(1).shape)
print(" - references.npy with shape", chunks.targets.shape)
print(" - reference_lengths.npy shape", chunks.lengths.shape)
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
reads = h5py.File(args.chunkify_file, 'r')['Reads']
training, validation = validation_split(reads, args.validation_reads)
print("> preparing training chunks\n")
training_chunks = chunk_dataset(training, args.chunksize)
training_indices = typical_indices(training_chunks.lengths)
training_chunks = filter_chunks(training_chunks, np.random.permutation(training_indices))
save_chunks(training_chunks, args.output_directory)
print("\n> preparing validation chunks\n")
validation_chunks = chunk_dataset(validation, args.chunksize)
validation_indices = typical_indices(validation_chunks.lengths)
validation_chunks = filter_chunks(validation_chunks, validation_indices)
save_chunks(validation_chunks, os.path.join(args.output_directory, "validation"))
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("chunkify_file")
parser.add_argument("output_directory")
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--chunksize", default=3600, type=int)
parser.add_argument("--validation-reads", default=1000, type=int)
return parser
|
"""
Bonito Export
"""
import os
import re
import sys
import json
import torch
import bonito
import hashlib
import numpy as np
from glob import glob
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, torch.nn.Parameter):
return obj.data
elif isinstance(obj, torch.Tensor):
return obj.detach().numpy()
else:
return super(JsonEncoder, self).default(obj)
def file_md5(filename, nblock=1024):
"""
Get md5 string from file.
"""
hasher = hashlib.md5()
block_size = nblock * hasher.block_size
with open(filename, "rb") as fh:
for blk in iter((lambda: fh.read(block_size)), b""):
hasher.update(blk)
return hasher.hexdigest()
def reformat_output_layer(layer_dict):
n_base, state_len, blank_score = [layer_dict.pop(k) for k in ['n_base', 'state_len', 'blank_score']]
layer_dict['size'] = (n_base + 1) * n_base**state_len
layer_dict['type'] = 'GlobalNormTransducer'
if blank_score is not None:
assert layer_dict['activation'] == 'tanh'
params = layer_dict['params']
params['W'] = torch.nn.functional.pad(
params['W'].reshape([n_base**state_len, n_base, -1]),
(0, 0, 1, 0),
value=0.
).reshape((n_base + 1) * n_base**state_len, -1)
params['b'] = torch.nn.functional.pad(
params['b'].reshape(n_base**state_len, n_base),
(1, 0),
value=np.arctanh(blank_score / layer_dict['scale'])
).reshape(-1)
return layer_dict
def to_guppy_dict(model, include_weights=True):
guppy_dict = bonito.nn.to_dict(model.encoder, include_weights=include_weights)
guppy_dict['sublayers'] = [x for x in guppy_dict['sublayers'] if x['type'] != 'permute']
guppy_dict['sublayers'] = [dict(x, type='LSTM', activation='tanh', gate='sigmoid') if x['type'] == 'lstm' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [dict(x, padding=(x['padding'], x['padding'])) if x['type'] == 'convolution' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [{'type': 'reverse', 'sublayers': x} if x.pop('reverse', False) else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'][-1] = reformat_output_layer(guppy_dict['sublayers'][-1])
return guppy_dict
def main(args):
if not os.path.isdir(args.model):
print("[error] file given - please provide a model directory to export.", file=sys.stderr)
return 1
model = bonito.util.load_model(args.model, device='cpu')
jsn = to_guppy_dict(model)
weight_files = glob(os.path.join(args.model, "weights_*.tar"))
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
jsn["md5sum"] = file_md5(os.path.join(args.model, 'weights_%s.tar' % weights))
json.dump(jsn, sys.stdout, cls=JsonEncoder)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument('model')
return parser
|
"""
Bonito model viewer - display a model architecture for a given config.
"""
import toml
import argparse
from bonito.util import load_symbol
def main(args):
config = toml.load(args.config)
Model = load_symbol(config, "Model")
model = Model(config)
print(model)
print("Total parameters in model", sum(p.numel() for p in model.parameters()))
def argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("config")
return parser
|
"""
Bonito Basecaller
"""
import sys
import torch
import numpy as np
from tqdm import tqdm
from time import perf_counter
from datetime import timedelta
from itertools import islice as take
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.aligner import Aligner
from bonito.io import CTCWriter, Writer
from bonito.fast5 import get_reads, read_chunks
from bonito.multiprocessing import process_cancel
from bonito.util import column_to_set, load_symbol, load_model
def main(args):
if args.save_ctc and not args.reference:
sys.stderr.write("> a reference is needed to output ctc training data\n")
exit(1)
sys.stderr.write("> loading model\n")
model = load_model(args.model_directory, args.device, weights=int(args.weights))
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map', best_n=1)
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
reads = get_reads(
args.reads_directory, n_proc=8, recursive=args.recursive,
read_ids=column_to_set(args.read_ids), skip=args.skip,
cancel=process_cancel()
)
if args.max_reads:
reads = take(reads, args.max_reads)
basecall = load_symbol(args.model_directory, "basecall")
if args.save_ctc:
reads = (
chunk for read in reads for chunk in read_chunks(read, chunksize=args.chunksize)
)
basecalls = basecall(
model, reads, batchsize=64, chunksize=args.chunksize,
aligner=aligner, qscores=args.fastq, reverse=args.revcomp,
)
writer = CTCWriter(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, args.ctc_min_coverage, args.ctc_min_accuracy
)
else:
basecalls = basecall(
model, reads, aligner=aligner, reverse=args.revcomp,
qscores=args.fastq, batchsize=args.batchsize, chunksize=args.chunksize,
)
writer = Writer(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, fastq=args.fastq
)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
sys.stderr.write("> completed reads: %s\n" % len(writer.log))
sys.stderr.write("> duration: %s\n" % timedelta(seconds=np.round(duration)))
sys.stderr.write("> samples per second %.1E\n" % (num_samples / duration))
sys.stderr.write("> done\n")
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("reads_directory")
parser.add_argument("--reference")
parser.add_argument("--read-ids")
parser.add_argument("--device", default="cuda")
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--skip", action="store_true", default=False)
parser.add_argument("--fastq", action="store_true", default=False)
parser.add_argument("--save-ctc", action="store_true", default=False)
parser.add_argument("--revcomp", action="store_true", default=False)
parser.add_argument("--recursive", action="store_true", default=False)
parser.add_argument("--ctc-min-coverage", default=0.9, type=float)
parser.add_argument("--ctc-min-accuracy", default=0.9, type=float)
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--chunksize", default=4000, type=int)
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
"""
Bonito Duplex consensus decoding.
https://www.biorxiv.org/content/10.1101/2020.02.25.956771v1
"""
import os
import sys
import json
from glob import glob
from pathlib import Path
from os.path import basename
from functools import partial
from time import perf_counter
from datetime import timedelta
from multiprocessing import Pool
from itertools import islice, groupby
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Process, Queue, Lock, cpu_count
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import spoa
import torch
import parasail
import numpy as np
import pandas as pd
from tqdm import tqdm
from fast_ctc_decode import crf_beam_search, crf_beam_search_duplex
from genomeworks import cuda
from genomeworks.cudapoa import CudaPoaBatch, status_to_str
import bonito
from bonito.io import Writer, devnull
from bonito.aligner import Aligner, align_map
from bonito.util import load_model, half_supported
from bonito.crf.basecall import transfer, split_read, stitch
from bonito.fast5 import get_raw_data_for_read, get_fast5_file
from bonito.util import unbatchify, batchify, chunk, concat, accuracy
from bonito.multiprocessing import thread_map, process_map, process_cancel
def poagen(groups, gpu_percent=0.8):
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch = gpu_percent * free
max_seq_sz = 0
max_sequences_per_poa = 0
for group in groups:
longest_seq = len(max(group, key=len))
max_seq_sz = longest_seq if longest_seq > max_seq_sz else max_seq_sz
seq_in_poa = len(group)
max_sequences_per_poa = seq_in_poa if seq_in_poa > max_sequences_per_poa else max_sequences_per_poa
batch = CudaPoaBatch(
max_sequences_per_poa,
max_seq_sz,
gpu_mem_per_batch,
output_type="consensus",
cuda_banded_alignment=True,
alignment_band_width=256,
)
poa_index = 0
initial_count = 0
while poa_index < len(groups):
group = groups[poa_index]
group_status, seq_status = batch.add_poa_group(group)
# If group was added and more space is left in batch, continue onto next group.
if group_status == 0:
for seq_index, status in enumerate(seq_status):
if status != 0:
print("Could not add sequence {} to POA {} - error {}".format(seq_index, poa_index, status_to_str(status)), file=sys.stderr)
poa_index += 1
# Once batch is full or no groups are left, run POA processing.
if ((group_status == 1) or ((group_status == 0) and (poa_index == len(groups)))):
batch.generate_poa()
consensus, coverage, con_status = batch.get_consensus()
for p, status in enumerate(con_status):
if status != 0:
print("Could not get consensus for POA group {} - {}".format(initial_count + p, status_to_str(status)), file=sys.stderr)
yield from consensus
initial_count = poa_index
batch.reset()
# In the case where POA group wasn't processed correctly.
elif group_status != 0:
print("Could not add POA group {} to batch - {}".format(poa_index, status_to_str(group_status)), file=sys.stderr)
poa_index += 1
def get_read(readdir, summary, idx):
"""
Get a single read from row `idx` in the `summary` dataframe.
"""
return get_raw_data_for_read(
(readdir / summary.iloc[idx].filename_fast5, summary.iloc[idx].read_id)
)
def read_gen(directory, summary, n_proc=1, cancel=None):
"""
Generate reads from the given `directory` listed in the `summary` dataframe.
"""
with Pool(n_proc) as pool:
for read in pool.imap(partial(get_read, Path(directory), summary), range(len(summary))):
yield read
if cancel is not None and cancel.is_set():
return
def get_read_ids(filename):
"""
Return a dictionary of read_id -> filename mappings.
"""
with get_fast5_file(filename, 'r') as f5:
return {
read.read_id: basename(filename) for read in f5.get_reads()
}
def build_index(files, n_proc=1):
"""
Build an index of read ids to filename mappings
"""
index = {}
with ProcessPoolExecutor(max_workers=n_proc) as pool:
for res in tqdm(pool.map(get_read_ids, files), leave=False):
index.update(res)
return index
def build_envelope(len1, seq1, path1, len2, seq2, path2, padding=15):
# needleman-wunsch alignment with constant gap penalty.
aln = parasail.nw_trace_striped_32(seq2, seq1, 2, 2, parasail.dnafull)
# pair up positions
alignment = np.column_stack([
np.cumsum([x != '-' for x in aln.traceback.ref]) - 1,
np.cumsum([x != '-' for x in aln.traceback.query]) - 1
])
path_range1 = np.column_stack([path1, path1[1:] + [len1]])
path_range2 = np.column_stack([path2, path2[1:] + [len2]])
envelope = np.full((len1, 2), -1, dtype=int)
for idx1, idx2 in alignment.clip(0):
st_1, en_1 = path_range1[idx1]
st_2, en_2 = path_range2[idx2]
for idx in range(st_1, en_1):
if st_2 < envelope[idx, 0] or envelope[idx, 0] < 0:
envelope[idx, 0] = st_2
if en_2 > envelope[idx, 1] or envelope[idx, 1] < 0:
envelope[idx, 1] = en_2
# add a little padding to ensure some overlap
envelope[:, 0] = envelope[:, 0] - padding
envelope[:, 1] = envelope[:, 1] + padding
envelope = np.clip(envelope, 0, len2)
prev_end = 0
for i in range(envelope.shape[0]):
if envelope[i, 0] > envelope[i, 1]:
envelope[i, 0] = 0
if envelope[i, 0] > prev_end:
envelope[i, 0] = prev_end
prev_end = envelope[i, 1]
return envelope.astype(np.uint64)
def find_follow_on(df, gap=5, distance=51, cov=0.85, min_len=100):
"""
Find follow on reads from a sequencing summary file.
"""
df = df[
df.alignment_coverage.astype('float32').gt(cov) &
df.sequence_length_template.astype('int32').gt(min_len)
]
df = df.sort_values(['run_id', 'channel', 'mux', 'start_time'])
genome_start = np.array(df.alignment_genome_start, dtype=np.int32)
genome_end = np.array(df.alignment_genome_end, dtype=np.int32)
direction = np.array(df.alignment_direction)
start_time = np.array(df.start_time, dtype=np.float32)
end_time = np.array(df.start_time + df.duration, dtype=np.float32)
channel = np.array(df.channel, dtype=np.int32)
mux = np.array(df.mux, dtype=np.int32)
filt = (
(channel[1:] == channel[:-1]) &
(mux[1:] == mux[:-1]) &
(np.abs(genome_start[1:] - genome_start[:-1]) < distance) &
(np.abs(genome_end[1:] - genome_end[:-1]) < distance) &
(direction[1:] != direction[:-1]) &
(start_time[1:] - end_time[:-1] < gap)
)
mask = np.full(len(filt) + 1, False)
mask[:-1] = mask[:-1] | filt
mask[1:] = mask[1:] | filt
return df[mask]
def compute_scores(model, batch, reverse=False):
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model.encoder(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
trans, init = model.seqdist.compute_transition_probs(scores, betas)
return {
'trans': trans.to(dtype).transpose(0, 1),
'init': init.to(dtype).unsqueeze(1),
}
def basecall(model, reads, chunksize=4000, overlap=500, batchsize=32, reverse=False):
reads = (
read_chunk for read in reads
for read_chunk in split_read(read, chunksize * batchsize)[::-1 if reverse else 1]
)
chunks = (
((read, start, end),
chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, compute_scores(model, batch, reverse=reverse))
for k, batch in batchify(chunks, batchsize=batchsize)
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
return (
(read, concat([part for k, part in parts]))
for read, parts in groupby(transferred, lambda x: x[0])
)
def beam_search_duplex(seq1, path1, t1, b1, seq2, path2, t2, b2, alphabet='NACGT', beamsize=5, pad=40, T=0.01):
env = build_envelope(t1.shape[0], seq1, path1, t2.shape[0], seq2, path2, padding=pad)
return crf_beam_search_duplex(
t1, b1, t2, b2,
alphabet=alphabet,
beam_size=beamsize,
beam_cut_threshold=T,
envelope=env,
)
def decode(res, beamsize_1=5, pad_1=40, cut_1=0.01, beamsize_2=5, pad_2=40, cut_2=0.01, match=80, alphabet="NACGT"):
temp_probs, init1 = res[0]['trans'].astype(np.float32), res[0]['init'][0].astype(np.float32)
comp_probs, init2 = res[1]['trans'].astype(np.float32), res[1]['init'][0].astype(np.float32)
simplex1, path1 = crf_beam_search(temp_probs, init1, alphabet, beam_size=5, beam_cut_threshold=0.01)
simplex2, path2 = crf_beam_search(comp_probs, init2, alphabet, beam_size=5, beam_cut_threshold=0.01)
if len(simplex1) < 10 or len(simplex2) < 10:
return [simplex1, simplex2]
if accuracy(simplex1, simplex2) < match:
return [simplex1, simplex2]
duplex1 = beam_search_duplex(
simplex1, path1, temp_probs, init1, simplex2, path2, comp_probs, init2, pad=pad_1, beamsize=5, T=cut_1
)
duplex2 = beam_search_duplex(
simplex2, path2, comp_probs, init2, simplex1, path1, temp_probs, init1, pad=pad_2, beamsize=5, T=cut_2
)
return [duplex1, duplex2, simplex1, simplex2]
def poa(seqs, allseq=False):
con, msa = spoa.poa(seqs, genmsa=False)
if allseq: return (con, *seqs)
return (con, )
def call(model, reads_directory, templates, complements, aligner=None, cudapoa=True):
temp_reads = read_gen(reads_directory, templates, n_proc=8, cancel=process_cancel())
comp_reads = read_gen(reads_directory, complements, n_proc=8, cancel=process_cancel())
temp_scores = basecall(model, temp_reads, reverse=False)
comp_scores = basecall(model, comp_reads, reverse=True)
scores = (((r1, r2), (s1, s2)) for (r1, s1), (r2, s2) in zip(temp_scores, comp_scores))
calls = thread_map(decode, scores, n_thread=12)
if cudapoa:
sequences = ((reads, [seqs, ]) for reads, seqs in calls if len(seqs) > 2)
consensus = (zip(reads, poagen(calls)) for reads, calls in batchify(sequences, 100))
res = ((reads[0], {'sequence': seq}) for seqs in consensus for reads, seq in seqs)
else:
sequences = ((reads, seqs) for reads, seqs in calls if len(seqs) > 2)
consensus = process_map(poa, sequences, n_proc=4)
res = ((reads, {'sequence': seq}) for reads, seqs in consensus for seq in seqs)
if aligner is None: return res
return align_map(aligner, res)
def main(args):
sys.stderr.write("> loading model\n")
model = load_model(args.model, args.device)
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map')
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
if args.summary:
sys.stderr.write("> finding follow on strands\n")
pairs = pd.read_csv(args.summary, '\t', low_memory=False)
pairs = pairs[pairs.sequence_length_template.gt(0)]
if 'filename' in pairs.columns:
pairs = pairs.rename(columns={'filename': 'filename_fast5'})
if 'alignment_strand_coverage' in pairs.columns:
pairs = pairs.rename(columns={'alignment_strand_coverage': 'alignment_coverage'})
valid_fast5s = [
f for f in pairs.filename_fast5.unique()
if ((args.reads_directory / Path(f)).exists())
]
pairs = pairs[pairs.filename_fast5.isin(valid_fast5s)]
pairs = find_follow_on(pairs)
sys.stderr.write("> found %s follow strands in summary\n" % (len(pairs) // 2))
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
temp_reads = pairs.iloc[0::2]
comp_reads = pairs.iloc[1::2]
else:
if args.index is not None:
sys.stderr.write("> loading read index\n")
index = json.load(open(args.index, 'r'))
else:
sys.stderr.write("> building read index\n")
files = list(glob(os.path.join(args.reads_directory, '*.fast5')))
index = build_index(files, n_proc=8)
if args.save_index:
with open('bonito-read-id.idx', 'w') as f:
json.dump(index, f)
pairs = pd.read_csv(args.pairs, sep=args.sep, names=['read_1', 'read_2'])
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
pairs['file_1'] = pairs['read_1'].apply(index.get)
pairs['file_2'] = pairs['read_2'].apply(index.get)
pairs = pairs.dropna().reset_index()
temp_reads = pairs[['read_1', 'file_1']].rename(
columns={'read_1': 'read_id', 'file_1': 'filename_fast5'}
)
comp_reads = pairs[['read_2', 'file_2']].rename(
columns={'read_2': 'read_id', 'file_2': 'filename_fast5'}
)
if len(pairs) == 0:
print("> no matched pairs found in given directory", file=sys.stderr)
exit(1)
# https://github.com/clara-parabricks/GenomeWorks/issues/648
with devnull(): CudaPoaBatch(1000, 1000, 3724032)
basecalls = call(model, args.reads_directory, temp_reads, comp_reads, aligner=aligner)
writer = Writer(tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, duplex=True)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
print("> duration: %s" % timedelta(seconds=np.round(duration)), file=sys.stderr)
print("> samples per second %.1E" % (num_samples / duration), file=sys.stderr)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model")
parser.add_argument("reads_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument("--summary", default=None)
group.add_argument("--pairs", default=None)
parser.add_argument("--sep", default=' ')
parser.add_argument("--index", default=None)
parser.add_argument("--save-index", action="store_true", default=False)
parser.add_argument("--reference")
parser.add_argument("--device", default="cuda")
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
#!/usr/bin/env python3
"""
Bonito training.
"""
import os
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
from bonito.util import __models__, default_config, default_data
from bonito.util import load_data, load_model, load_symbol, init, half_supported
from bonito.training import ChunkDataSet, load_state, Trainer
import toml
import torch
import numpy as np
from torch.optim import AdamW
from torch.utils.data import DataLoader
def main(args):
workdir = os.path.expanduser(args.training_directory)
if os.path.exists(workdir) and not args.force:
print("[error] %s exists, use -f to force continue training." % workdir)
exit(1)
init(args.seed, args.device)
device = torch.device(args.device)
print("[loading data]")
train_data = load_data(limit=args.chunks, directory=args.directory)
if os.path.exists(os.path.join(args.directory, 'validation')):
valid_data = load_data(directory=os.path.join(args.directory, 'validation'))
else:
print("[validation set not found: splitting training set]")
split = np.floor(len(train_data[0]) * 0.97).astype(np.int32)
valid_data = [x[split:] for x in train_data]
train_data = [x[:split] for x in train_data]
train_loader = DataLoader(ChunkDataSet(*train_data), batch_size=args.batch, shuffle=True, num_workers=4, pin_memory=True)
valid_loader = DataLoader(ChunkDataSet(*valid_data), batch_size=args.batch, num_workers=4, pin_memory=True)
if args.pretrained:
dirname = args.pretrained
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
config_file = os.path.join(dirname, 'config.toml')
else:
config_file = args.config
config = toml.load(config_file)
argsdict = dict(training=vars(args))
os.makedirs(workdir, exist_ok=True)
toml.dump({**config, **argsdict}, open(os.path.join(workdir, 'config.toml'), 'w'))
print("[loading model]")
if args.pretrained:
print("[using pretrained model {}]".format(args.pretrained))
model = load_model(args.pretrained, device, half=False)
else:
model = load_symbol(config, 'Model')(config)
last_epoch = load_state(workdir, args.device, model)
if args.multi_gpu:
from torch.nn import DataParallel
model = DataParallel(model)
model.decode = model.module.decode
model.alphabet = model.module.alphabet
trainer = Trainer(model, device, train_loader, valid_loader, use_amp=half_supported() and not args.no_amp)
trainer.fit(workdir, args.epochs, args.lr, last_epoch=last_epoch)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("training_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument('--config', default=default_config)
group.add_argument('--pretrained', default="")
parser.add_argument("--directory", default=default_data)
parser.add_argument("--device", default="cuda")
parser.add_argument("--lr", default=2e-3, type=float)
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--epochs", default=5, type=int)
parser.add_argument("--batch", default=64, type=int)
parser.add_argument("--chunks", default=0, type=int)
parser.add_argument("--no-amp", action="store_true", default=False)
parser.add_argument("--multi-gpu", action="store_true", default=False)
parser.add_argument("-f", "--force", action="store_true", default=False)
return parser
|
"""
Bonito model evaluator
"""
import os
import time
import torch
import numpy as np
from itertools import starmap
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.training import ChunkDataSet
from bonito.util import accuracy, poa, decode_ref, half_supported
from bonito.util import init, load_data, load_model, concat, permute
from torch.utils.data import DataLoader
def main(args):
poas = []
init(args.seed, args.device)
print("* loading data")
directory = args.directory
if os.path.exists(os.path.join(directory, 'validation')):
directory = os.path.join(directory, 'validation')
testdata = ChunkDataSet(
*load_data(
limit=args.chunks, directory=directory
)
)
dataloader = DataLoader(testdata, batch_size=args.batchsize)
accuracy_with_cov = lambda ref, seq: accuracy(ref, seq, min_coverage=args.min_coverage)
for w in [int(i) for i in args.weights.split(',')]:
seqs = []
print("* loading model", w)
model = load_model(args.model_directory, args.device, weights=w)
print("* calling")
t0 = time.perf_counter()
with torch.no_grad():
for data, *_ in dataloader:
if half_supported():
data = data.type(torch.float16).to(args.device)
else:
data = data.to(args.device)
log_probs = model(data)
if hasattr(model, 'decode_batch'):
seqs.extend(model.decode_batch(log_probs))
else:
seqs.extend([model.decode(p) for p in permute(log_probs, 'TNC', 'NTC')])
duration = time.perf_counter() - t0
refs = [decode_ref(target, model.alphabet) for target in dataloader.dataset.targets]
accuracies = [accuracy_with_cov(ref, seq) if len(seq) else 0. for ref, seq in zip(refs, seqs)]
if args.poa: poas.append(sequences)
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
print("* samples/s %.2E" % (args.chunks * data.shape[2] / duration))
if args.poa:
print("* doing poa")
t0 = time.perf_counter()
# group each sequence prediction per model together
poas = [list(seq) for seq in zip(*poas)]
consensuses = poa(poas)
duration = time.perf_counter() - t0
accuracies = list(starmap(accuracy_with_coverage_filter, zip(references, consensuses)))
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("--directory", default=None)
parser.add_argument("--device", default="cuda")
parser.add_argument("--seed", default=9, type=int)
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--chunks", default=1000, type=int)
parser.add_argument("--batchsize", default=96, type=int)
parser.add_argument("--beamsize", default=5, type=int)
parser.add_argument("--poa", action="store_true", default=False)
parser.add_argument("--min-coverage", default=0.5, type=float)
return parser
|
from .model import Model
from .basecall import basecall
|
"""
Bonito CTC-CRF Model.
"""
import torch
import numpy as np
from bonito.nn import Module, Convolution, SHABlock, LinearCRFEncoder, Serial, Permute, layers, from_dict
import seqdist.sparse
from seqdist.ctc_simple import logZ_cupy, viterbi_alignments
from seqdist.core import SequenceDist, Max, Log, semiring
def get_stride(m):
if hasattr(m, 'stride'):
return m.stride if isinstance(m.stride, int) else m.stride[0]
if isinstance(m, Convolution):
return get_stride(m.conv)
if isinstance(m, Serial):
return int(np.prod([get_stride(x) for x in m]))
return 1
class CTC_CRF(SequenceDist):
def __init__(self, state_len, alphabet):
super().__init__()
self.alphabet = alphabet
self.state_len = state_len
self.n_base = len(alphabet[1:])
self.idx = torch.cat([
torch.arange(self.n_base**(self.state_len))[:, None],
torch.arange(
self.n_base**(self.state_len)
).repeat_interleave(self.n_base).reshape(self.n_base, -1).T
], dim=1).to(torch.int32)
def n_score(self):
return len(self.alphabet) * self.n_base**(self.state_len)
def logZ(self, scores, S:semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, len(self.alphabet))
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.logZ(Ms, self.idx, alpha_0, beta_T, S)
def normalise(self, scores):
return (scores - self.logZ(scores)[:, None] / len(scores))
def forward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.fwd_scores_cupy(Ms, self.idx, alpha_0, S, K=1)
def backward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.bwd_scores_cupy(Ms, self.idx, beta_T, S, K=1)
def compute_transition_probs(self, scores, betas):
T, N, C = scores.shape
# add bwd scores to edge scores
log_trans_probs = (scores.reshape(T, N, -1, self.n_base + 1) + betas[1:, :, :, None])
# transpose from (new_state, dropped_base) to (old_state, emitted_base) layout
log_trans_probs = torch.cat([
log_trans_probs[:, :, :, [0]],
log_trans_probs[:, :, :, 1:].transpose(3, 2).reshape(T, N, -1, self.n_base)
], dim=-1)
# convert from log probs to probs by exponentiating and normalising
trans_probs = torch.softmax(log_trans_probs, dim=-1)
#convert first bwd score to initial state probabilities
init_state_probs = torch.softmax(betas[0], dim=-1)
return trans_probs, init_state_probs
def reverse_complement(self, scores):
T, N, C = scores.shape
expand_dims = T, N, *(self.n_base for _ in range(self.state_len)), self.n_base + 1
scores = scores.reshape(*expand_dims)
blanks = torch.flip(scores[..., 0].permute(
0, 1, *range(self.state_len + 1, 1, -1)).reshape(T, N, -1, 1), [0, 2]
)
emissions = torch.flip(scores[..., 1:].permute(
0, 1, *range(self.state_len, 1, -1),
self.state_len +2,
self.state_len + 1).reshape(T, N, -1, self.n_base), [0, 2, 3]
)
return torch.cat([blanks, emissions], dim=-1).reshape(T, N, -1)
def viterbi(self, scores):
traceback = self.posteriors(scores, Max)
paths = traceback.argmax(2) % len(self.alphabet)
return paths
def path_to_str(self, path):
alphabet = np.frombuffer(''.join(self.alphabet).encode(), dtype='u1')
seq = alphabet[path[path != 0]]
return seq.tobytes().decode()
def prepare_ctc_scores(self, scores, targets):
# convert from CTC targets (with blank=0) to zero indexed
targets = torch.clamp(targets - 1, 0)
T, N, C = scores.shape
scores = scores.to(torch.float32)
n = targets.size(1) - (self.state_len - 1)
stay_indices = sum(
targets[:, i:n + i] * self.n_base ** (self.state_len - i - 1)
for i in range(self.state_len)
) * len(self.alphabet)
move_indices = stay_indices[:, 1:] + targets[:, :n - 1] + 1
stay_scores = scores.gather(2, stay_indices.expand(T, -1, -1))
move_scores = scores.gather(2, move_indices.expand(T, -1, -1))
return stay_scores, move_scores
def ctc_loss(self, scores, targets, target_lengths, loss_clip=None, reduction='mean', normalise_scores=True):
if normalise_scores:
scores = self.normalise(scores)
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
logz = logZ_cupy(stay_scores, move_scores, target_lengths + 1 - self.state_len)
loss = - (logz / target_lengths)
if loss_clip:
loss = torch.clamp(loss, 0.0, loss_clip)
if reduction == 'mean':
return loss.mean()
elif reduction in ('none', None):
return loss
else:
raise ValueError('Unknown reduction type {}'.format(reduction))
def ctc_viterbi_alignments(self, scores, targets, target_lengths):
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
return viterbi_alignments(stay_scores, move_scores, target_lengths + 1 - self.state_len)
def conv(c_in, c_out, ks, stride=1, bias=False, activation=None):
return Convolution(c_in, c_out, ks, stride=stride, padding=ks//2, bias=bias, activation=activation)
def rnn_encoder(n_base, state_len, insize=1, stride=5, winlen=19, activation='swish', rnn_type='lstm', features=768, scale=5.0, blank_score=None, single_head_attn=False):
rnn = layers[rnn_type]
return Serial([
conv(insize, 4, ks=5, bias=True, activation=activation),
conv(4, 16, ks=5, bias=True, activation=activation),
conv(16, features, ks=winlen, stride=stride, bias=True, activation=activation),
Permute([2, 0, 1]),
rnn(features, features, reverse=True), rnn(features, features),
rnn(features, features, reverse=True), rnn(features, features),
*([SHABlock(features)] if single_head_attn else []),
rnn(features, features, reverse=True),
LinearCRFEncoder(features, n_base, state_len, bias=True, activation='tanh', scale=scale, blank_score=blank_score)
])
class SeqdistModel(Module):
def __init__(self, encoder, seqdist):
super().__init__()
self.seqdist = seqdist
self.encoder = encoder
self.stride = get_stride(encoder)
self.alphabet = seqdist.alphabet
def forward(self, x):
return self.encoder(x).to(torch.float32)
def decode_batch(self, x):
scores = self.seqdist.posteriors(x.to(torch.float32)) + 1e-8
tracebacks = self.seqdist.viterbi(scores.log()).to(torch.int16).T
return [self.seqdist.path_to_str(x) for x in tracebacks.cpu().numpy()]
def decode(self, x):
return self.decode_batch(x.unsqueeze(1))[0]
class Model(SeqdistModel):
def __init__(self, config):
seqdist = CTC_CRF(
state_len=config['global_norm']['state_len'],
alphabet=config['labels']['labels']
)
if 'type' in config['encoder']: #new-style config
encoder = from_dict(config['encoder'])
else: #old-style
encoder = rnn_encoder(seqdist.n_base, seqdist.state_len, insize=config['input']['features'], **config['encoder'])
super().__init__(encoder, seqdist)
self.config = config
|
"""
Bonito CRF basecall
"""
import torch
import numpy as np
from kbeam import beamsearch
from itertools import groupby
from functools import partial
from operator import itemgetter
import bonito
from bonito.io import Writer
from bonito.fast5 import get_reads
from bonito.aligner import align_map
from bonito.multiprocessing import thread_map, thread_iter
from bonito.util import concat, chunk, batchify, unbatchify, half_supported
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if isinstance(chunks, dict):
return {
k: stitch(v, chunksize, overlap, length, stride, reverse=reverse)
for k, v in chunks.items()
}
return bonito.util.stitch(chunks, chunksize, overlap, length, stride, reverse=reverse)
def compute_scores(model, batch, reverse=False):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
betas -= (betas.max(2, keepdim=True)[0] - 5.0)
return {
'scores': scores.transpose(0, 1),
'betas': betas.transpose(0, 1),
}
def quantise_int8(x, scale=127/5):
"""
Quantise scores to int8.
"""
scores = x['scores']
scores *= scale
scores = torch.round(scores).to(torch.int8).detach()
betas = x['betas']
betas *= scale
betas = torch.round(torch.clamp(betas, -127., 128.)).to(torch.int8).detach()
return {'scores': scores, 'betas': betas}
def transfer(x):
"""
Device to host transfer using pinned memory.
"""
torch.cuda.synchronize()
with torch.cuda.stream(torch.cuda.Stream()):
return {
k: torch.empty(v.shape, pin_memory=True, dtype=v.dtype).copy_(v).numpy()
for k, v in x.items()
}
def decode_int8(scores, seqdist, scale=127/5, beamsize=40, beamcut=100.0):
"""
Beamsearch decode.
"""
path, _ = beamsearch(
scores['scores'], scale, seqdist.n_base, beamsize,
guide=scores['betas'], beam_cut=beamcut
)
try:
return seqdist.path_to_str(path % 4 + 1)
except IndexError:
return ""
def split_read(read, split_read_length=400000):
"""
Split large reads into manageable pieces.
"""
if len(read.signal) <= split_read_length:
return [(read, 0, len(read.signal))]
breaks = np.arange(0, len(read.signal) + split_read_length, split_read_length)
return [(read, start, min(end, len(read.signal))) for (start, end) in zip(breaks[:-1], breaks[1:])]
def basecall(model, reads, aligner=None, beamsize=40, chunksize=4000, overlap=500, batchsize=32, qscores=False, reverse=False):
"""
Basecalls a set of reads.
"""
_decode = partial(decode_int8, seqdist=model.seqdist, beamsize=beamsize)
reads = (read_chunk for read in reads for read_chunk in split_read(read)[::-1 if reverse else 1])
chunks = (
((read, start, end), chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, quantise_int8(compute_scores(model, batch, reverse=reverse)))
for k, batch in thread_iter(batchify(chunks, batchsize=batchsize))
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
basecalls = thread_map(_decode, transferred, n_thread=8)
basecalls = (
(read, ''.join(seq for k, seq in parts))
for read, parts in groupby(basecalls, lambda x: (x[0].parent if hasattr(x[0], 'parent') else x[0]))
)
basecalls = (
(read, {'sequence': seq, 'qstring': '?' * len(seq) if qscores else '*', 'mean_qscore': 0.0})
for read, seq in basecalls
)
if aligner: return align_map(aligner, basecalls)
return basecalls
|
from .model import Model
from .basecall import basecall
|
"""
Bonito Model template
"""
import numpy as np
from bonito.nn import Permute, layers
import torch
from torch.nn.functional import log_softmax, ctc_loss
from torch.nn import Module, ModuleList, Sequential, Conv1d, BatchNorm1d, Dropout
from fast_ctc_decode import beam_search, viterbi_search
class Model(Module):
"""
Model template for QuartzNet style architectures
https://arxiv.org/pdf/1910.10261.pdf
"""
def __init__(self, config):
super(Model, self).__init__()
if 'qscore' not in config:
self.qbias = 0.0
self.qscale = 1.0
else:
self.qbias = config['qscore']['bias']
self.qscale = config['qscore']['scale']
self.config = config
self.stride = config['block'][0]['stride'][0]
self.alphabet = config['labels']['labels']
self.features = config['block'][-1]['filters']
self.encoder = Encoder(config)
self.decoder = Decoder(self.features, len(self.alphabet))
def forward(self, x):
encoded = self.encoder(x)
return self.decoder(encoded)
def decode(self, x, beamsize=5, threshold=1e-3, qscores=False, return_path=False):
x = x.exp().cpu().numpy().astype(np.float32)
if beamsize == 1 or qscores:
seq, path = viterbi_search(x, self.alphabet, qscores, self.qscale, self.qbias)
else:
seq, path = beam_search(x, self.alphabet, beamsize, threshold)
if return_path: return seq, path
return seq
def ctc_label_smoothing_loss(self, log_probs, targets, lengths, weights=None):
T, N, C = log_probs.shape
weights = weights or torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)])
log_probs_lengths = torch.full(size=(N, ), fill_value=T, dtype=torch.int64)
loss = ctc_loss(log_probs.to(torch.float32), targets, log_probs_lengths, lengths, reduction='mean')
label_smoothing_loss = -((log_probs * weights.to(log_probs.device)).mean())
return {'loss': loss + label_smoothing_loss, 'ctc_loss': loss, 'label_smooth_loss': label_smoothing_loss}
class Encoder(Module):
"""
Builds the model encoder
"""
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
features = self.config['input']['features']
activation = layers[self.config['encoder']['activation']]()
encoder_layers = []
for layer in self.config['block']:
encoder_layers.append(
Block(
features, layer['filters'], activation,
repeat=layer['repeat'], kernel_size=layer['kernel'],
stride=layer['stride'], dilation=layer['dilation'],
dropout=layer['dropout'], residual=layer['residual'],
separable=layer['separable'],
)
)
features = layer['filters']
self.encoder = Sequential(*encoder_layers)
def forward(self, x):
return self.encoder(x)
class TCSConv1d(Module):
"""
Time-Channel Separable 1D Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, separable=False):
super(TCSConv1d, self).__init__()
self.separable = separable
if separable:
self.depthwise = Conv1d(
in_channels, in_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=in_channels
)
self.pointwise = Conv1d(
in_channels, out_channels, kernel_size=1, stride=1,
dilation=dilation, bias=bias, padding=0
)
else:
self.conv = Conv1d(
in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias
)
def forward(self, x):
if self.separable:
x = self.depthwise(x)
x = self.pointwise(x)
else:
x = self.conv(x)
return x
class Block(Module):
"""
TCSConv, Batch Normalisation, Activation, Dropout
"""
def __init__(self, in_channels, out_channels, activation, repeat=5, kernel_size=1, stride=1, dilation=1, dropout=0.0, residual=False, separable=False):
super(Block, self).__init__()
self.use_res = residual
self.conv = ModuleList()
_in_channels = in_channels
padding = self.get_padding(kernel_size[0], stride[0], dilation[0])
# add the first n - 1 convolutions + activation
for _ in range(repeat - 1):
self.conv.extend(
self.get_tcs(
_in_channels, out_channels, kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
self.conv.extend(self.get_activation(activation, dropout))
_in_channels = out_channels
# add the last conv and batch norm
self.conv.extend(
self.get_tcs(
_in_channels, out_channels,
kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
# add the residual connection
if self.use_res:
self.residual = Sequential(*self.get_tcs(in_channels, out_channels))
# add the activation and dropout
self.activation = Sequential(*self.get_activation(activation, dropout))
def get_activation(self, activation, dropout):
return activation, Dropout(p=dropout)
def get_padding(self, kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Dilation and stride can not both be greater than 1")
return (kernel_size // 2) * dilation
def get_tcs(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, padding=0, bias=False, separable=False):
return [
TCSConv1d(
in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, separable=separable
),
BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)
]
def forward(self, x):
_x = x
for layer in self.conv:
_x = layer(_x)
if self.use_res:
_x = _x + self.residual(x)
return self.activation(_x)
class Decoder(Module):
"""
Decoder
"""
def __init__(self, features, classes):
super(Decoder, self).__init__()
self.layers = Sequential(
Conv1d(features, classes, kernel_size=1, bias=True),
Permute([2, 0, 1])
)
def forward(self, x):
return log_softmax(self.layers(x), dim=-1)
|
"""
Bonito basecall
"""
import torch
import numpy as np
from functools import partial
from bonito.fast5 import ReadChunk
from bonito.aligner import align_map
from bonito.multiprocessing import process_map, thread_map
from bonito.util import mean_qscore_from_qstring, half_supported
from bonito.util import chunk, stitch, batchify, unbatchify, permute, concat
def basecall(model, reads, aligner=None, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None):
"""
Basecalls a set of reads.
"""
chunks = (
(read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads
)
scores = unbatchify(
(k, compute_scores(model, v)) for k, v in batchify(chunks, batchsize)
)
scores = (
(read, {'scores': stitch(v, chunksize, overlap, len(read.signal), model.stride)}) for read, v in scores
)
decoder = partial(decode, decode=model.decode, beamsize=beamsize, qscores=qscores)
basecalls = process_map(decoder, scores, n_proc=4)
if aligner: return align_map(aligner, basecalls)
return basecalls
def compute_scores(model, batch):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
chunks = batch.to(torch.half).to(device)
probs = permute(model(chunks), 'TNC', 'NTC')
return probs.cpu().to(torch.float32)
def decode(scores, decode, beamsize=5, qscores=False):
"""
Convert the network scores into a sequence.
"""
# do a greedy decode to get a sensible qstring to compute the mean qscore from
seq, path = decode(scores['scores'], beamsize=1, qscores=True, return_path=True)
seq, qstring = seq[:len(path)], seq[len(path):]
mean_qscore = mean_qscore_from_qstring(qstring)
# beam search will produce a better sequence but doesn't produce a sensible qstring/path
if not (qscores or beamsize == 1):
try:
seq = decode(scores['scores'], beamsize=beamsize)
path = None
qstring = '*'
except:
pass
return {'sequence': seq, 'qstring': qstring, 'mean_qscore': mean_qscore, 'path': path}
|
from bs_roformer.bs_roformer import BSRoformer
|
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = FlashAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v)
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
|
import torch
from torch import nn, einsum, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from bs_roformer.attend import Attend
from beartype.typing import Tuple, Optional, List
from beartype import beartype
from rotary_embedding_torch import RotaryEmbedding
from einops import rearrange, pack, unpack
# helper functions
def exists(val):
return val is not None
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# attention
class FeedForward(Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
rotary_embed = None,
flash = True
):
super().__init__()
self.heads = heads
self.scale = dim_head **-0.5
dim_inner = heads * dim_head
self.rotary_embed = rotary_embed
self.attend = Attend(flash = flash, dropout = dropout)
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(dim_inner, dim, bias = False),
nn.Dropout(dropout)
)
def forward(self, x):
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = self.heads)
if exists(self.rotary_embed):
q = self.rotary_embed.rotate_queries_or_keys(q)
k = self.rotary_embed.rotate_queries_or_keys(k)
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
norm_output = True,
rotary_embed = None,
flash_attn = True
):
super().__init__()
self.layers = ModuleList([])
for _ in range(depth):
self.layers.append(ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_embed = rotary_embed, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# bandsplit module
class BandSplit(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...]
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_features = ModuleList([])
for dim_in in dim_inputs:
net = nn.Sequential(
RMSNorm(dim_in),
nn.Linear(dim_in, dim)
)
self.to_features.append(net)
def forward(self, x):
x = x.split(self.dim_inputs, dim = -1)
outs = []
for split_input, to_feature in zip(x, self.to_features):
split_output = to_feature(split_input)
outs.append(split_output)
return torch.stack(outs, dim = -2)
class LinearGLUWithTanH(Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x.tanh() * gate.sigmoid()
class MaskEstimator(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...],
depth
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_freqs = ModuleList([])
for dim_in in dim_inputs:
net = []
for ind in range(depth):
is_last = ind == (depth - 1)
dim_out = dim if not is_last else dim_in
net.append(LinearGLUWithTanH(dim, dim_out))
self.to_freqs.append(nn.Sequential(*net))
def forward(self, x):
x = x.unbind(dim = -2)
outs = []
for band_features, to_freq in zip(x, self.to_freqs):
freq_out = to_freq(band_features)
outs.append(freq_out)
return torch.cat(outs, dim = -1)
# main class
class BSRoformer(Module):
@beartype
def __init__(
self,
dim,
*,
depth,
time_transformer_depth = 2,
freq_transformer_depth = 2,
freqs_per_bands: Tuple[int, ...] = (256, 257), # in the paper, they divide into ~60 bands, test with 1 for starters
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
flash_attn = True,
dim_freqs_in = 513,
stft_n_fft = 1024,
stft_hop_length = 256,
stft_win_length = 1024,
stft_normalized = False,
mask_estimator_depth = 1,
multi_stft_resolution_loss_weight = 1.,
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
multi_stft_hop_size = 147,
multi_stft_normalized = False
):
super().__init__()
self.layers = ModuleList([])
transformer_kwargs = dict(
dim = dim,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
flash_attn = flash_attn
)
time_rotary_embed = RotaryEmbedding(dim = dim_head)
freq_rotary_embed = RotaryEmbedding(dim = dim_head)
for _ in range(depth):
self.layers.append(nn.ModuleList([
Transformer(depth = time_transformer_depth, rotary_embed = time_rotary_embed, **transformer_kwargs),
Transformer(depth = freq_transformer_depth, rotary_embed = freq_rotary_embed, **transformer_kwargs)
]))
self.stft_kwargs = dict(
n_fft = stft_n_fft,
hop_length = stft_hop_length,
win_length = stft_win_length,
normalized = stft_normalized
)
freqs = torch.stft(torch.randn(1, 1024), **self.stft_kwargs, return_complex = True).shape[1]
assert len(freqs_per_bands) > 1
assert sum(freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings'
freqs_per_bands_with_complex = tuple(2 * f for f in freqs_per_bands)
self.band_split = BandSplit(
dim = dim,
dim_inputs = freqs_per_bands_with_complex
)
self.mask_estimator = MaskEstimator(
dim = dim,
dim_inputs = freqs_per_bands_with_complex,
depth = mask_estimator_depth
)
# for the multi-resolution stft loss
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
self.multi_stft_n_fft = stft_n_fft
self.multi_stft_kwargs = dict(
hop_length = multi_stft_hop_size,
normalized = multi_stft_normalized
)
def forward(
self,
raw_audio,
target = None,
return_loss_breakdown = False
):
"""
einops
b - batch
f - freq
t - time
c - complex (2)
d - feature dimension
"""
# to stft
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, return_complex = True)
stft_repr = torch.view_as_real(stft_repr)
x = rearrange(stft_repr, 'b f t c -> b t (f c)')
x = self.band_split(x)
# axial / hierarchical attention
for time_transformer, freq_transformer in self.layers:
x = rearrange(x, 'b t f d -> b f t d')
x, ps = pack([x], 'b * d')
x = time_transformer(x)
x, = unpack(x, ps, 'b * d')
x = rearrange(x, 'b f t d -> b t f d')
x, ps = pack([x], 'b * d')
x = freq_transformer(x)
x, = unpack(x, ps, 'b * d')
mask = self.mask_estimator(x)
mask = rearrange(mask, 'b t (f c) -> b f t c', c = 2)
# modulate frequency representation
stft_repr = stft_repr * mask
# istft
stft_repr = torch.view_as_complex(stft_repr)
recon_audio = torch.istft(stft_repr, **self.stft_kwargs, return_complex = False)
# if a target is passed in, calculate loss for learning
if not exists(target):
return recon_audio
target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft
loss = F.l1_loss(recon_audio, target)
multi_stft_resolution_loss = 0.
for window_size in self.multi_stft_resolutions_window_sizes:
res_stft_kwargs = dict(
n_fft = max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
win_length = window_size,
return_complex = True,
**self.multi_stft_kwargs,
)
recon_Y = torch.stft(recon_audio, **res_stft_kwargs)
target_Y = torch.stft(target, **res_stft_kwargs)
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
total_loss = loss + weighted_multi_resolution_loss
if not return_loss_breakdown:
return total_loss
return total_loss, (loss, multi_stft_resolution_loss)
|
import random
import torch
import torch.linalg
import numpy as np
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def seed_all(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def recursive_to(obj, device):
if isinstance(obj, torch.Tensor):
try:
return obj.cuda(device=device, non_blocking=True)
except RuntimeError:
return obj.to(device)
elif isinstance(obj, list):
return [recursive_to(o, device=device) for o in obj]
elif isinstance(obj, tuple):
return (recursive_to(o, device=device) for o in obj)
elif isinstance(obj, dict):
return {k: recursive_to(v, device=device) for k, v in obj.items()}
else:
return obj
|
import warnings
import torch
from Bio import BiopythonWarning
from Bio.PDB import Selection
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import three_to_one, three_to_index, is_aa
NON_STANDARD_SUBSTITUTIONS = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM':'ALA', 'ALO':'THR', 'ALY':'LYS', 'ARM':'ARG',
'ASA':'ASP', 'ASB':'ASP', 'ASK':'ASP', 'ASL':'ASP', 'ASQ':'ASP', 'AYA':'ALA', 'BCS':'CYS', 'BHD':'ASP', 'BMT':'THR', 'BNN':'ALA',
'BUC':'CYS', 'BUG':'LEU', 'C5C':'CYS', 'C6C':'CYS', 'CAS':'CYS', 'CCS':'CYS', 'CEA':'CYS', 'CGU':'GLU', 'CHG':'ALA', 'CLE':'LEU', 'CME':'CYS',
'CSD':'ALA', 'CSO':'CYS', 'CSP':'CYS', 'CSS':'CYS', 'CSW':'CYS', 'CSX':'CYS', 'CXM':'MET', 'CY1':'CYS', 'CY3':'CYS', 'CYG':'CYS',
'CYM':'CYS', 'CYQ':'CYS', 'DAH':'PHE', 'DAL':'ALA', 'DAR':'ARG', 'DAS':'ASP', 'DCY':'CYS', 'DGL':'GLU', 'DGN':'GLN', 'DHA':'ALA',
'DHI':'HIS', 'DIL':'ILE', 'DIV':'VAL', 'DLE':'LEU', 'DLY':'LYS', 'DNP':'ALA', 'DPN':'PHE', 'DPR':'PRO', 'DSN':'SER', 'DSP':'ASP',
'DTH':'THR', 'DTR':'TRP', 'DTY':'TYR', 'DVA':'VAL', 'EFC':'CYS', 'FLA':'ALA', 'FME':'MET', 'GGL':'GLU', 'GL3':'GLY', 'GLZ':'GLY',
'GMA':'GLU', 'GSC':'GLY', 'HAC':'ALA', 'HAR':'ARG', 'HIC':'HIS', 'HIP':'HIS', 'HMR':'ARG', 'HPQ':'PHE', 'HTR':'TRP', 'HYP':'PRO',
'IAS':'ASP', 'IIL':'ILE', 'IYR':'TYR', 'KCX':'LYS', 'LLP':'LYS', 'LLY':'LYS', 'LTR':'TRP', 'LYM':'LYS', 'LYZ':'LYS', 'MAA':'ALA', 'MEN':'ASN',
'MHS':'HIS', 'MIS':'SER', 'MLE':'LEU', 'MPQ':'GLY', 'MSA':'GLY', 'MSE':'MET', 'MVA':'VAL', 'NEM':'HIS', 'NEP':'HIS', 'NLE':'LEU',
'NLN':'LEU', 'NLP':'LEU', 'NMC':'GLY', 'OAS':'SER', 'OCS':'CYS', 'OMT':'MET', 'PAQ':'TYR', 'PCA':'GLU', 'PEC':'CYS', 'PHI':'PHE',
'PHL':'PHE', 'PR3':'CYS', 'PRR':'ALA', 'PTR':'TYR', 'PYX':'CYS', 'SAC':'SER', 'SAR':'GLY', 'SCH':'CYS', 'SCS':'CYS', 'SCY':'CYS',
'SEL':'SER', 'SEP':'SER', 'SET':'SER', 'SHC':'CYS', 'SHR':'LYS', 'SMC':'CYS', 'SOC':'CYS', 'STY':'TYR', 'SVA':'SER', 'TIH':'ALA',
'TPL':'TRP', 'TPO':'THR', 'TPQ':'ALA', 'TRG':'LYS', 'TRO':'TRP', 'TYB':'TYR', 'TYI':'TYR', 'TYQ':'TYR', 'TYS':'TYR', 'TYY':'TYR'
}
RESIDUE_SIDECHAIN_POSTFIXES = {
'A': ['B'],
'R': ['B', 'G', 'D', 'E', 'Z', 'H1', 'H2'],
'N': ['B', 'G', 'D1', 'D2'],
'D': ['B', 'G', 'D1', 'D2'],
'C': ['B', 'G'],
'E': ['B', 'G', 'D', 'E1', 'E2'],
'Q': ['B', 'G', 'D', 'E1', 'E2'],
'G': [],
'H': ['B', 'G', 'D1', 'D2', 'E1', 'E2'],
'I': ['B', 'G1', 'G2', 'D1'],
'L': ['B', 'G', 'D1', 'D2'],
'K': ['B', 'G', 'D', 'E', 'Z'],
'M': ['B', 'G', 'D', 'E'],
'F': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z'],
'P': ['B', 'G', 'D'],
'S': ['B', 'G'],
'T': ['B', 'G1', 'G2'],
'W': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'E3', 'Z2', 'Z3', 'H2'],
'Y': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z', 'H'],
'V': ['B', 'G1', 'G2'],
}
GLY_INDEX = 5
ATOM_N, ATOM_CA, ATOM_C, ATOM_O, ATOM_CB = 0, 1, 2, 3, 4
def augmented_three_to_one(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_one(three)
def augmented_three_to_index(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_index(three)
def augmented_is_aa(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return is_aa(three, standard=True)
def is_hetero_residue(res):
return len(res.id[0].strip()) > 0
def get_atom_name_postfix(atom):
name = atom.get_name()
if name in ('N', 'CA', 'C', 'O'):
return name
if name[-1].isnumeric():
return name[-2:]
else:
return name[-1:]
def get_residue_pos14(res):
pos14 = torch.full([14, 3], float('inf'))
suffix_to_atom = {get_atom_name_postfix(a):a for a in res.get_atoms()}
atom_order = ['N', 'CA', 'C', 'O'] + RESIDUE_SIDECHAIN_POSTFIXES[augmented_three_to_one(res.get_resname())]
for i, atom_suffix in enumerate(atom_order):
if atom_suffix not in suffix_to_atom: continue
pos14[i,0], pos14[i,1], pos14[i,2] = suffix_to_atom[atom_suffix].get_coord().tolist()
return pos14
def parse_pdb(path, model_id=0):
warnings.simplefilter('ignore', BiopythonWarning)
parser = PDBParser()
structure = parser.get_structure(None, path)
return parse_complex(structure, model_id)
def parse_complex(structure, model_id=None):
if model_id is not None:
structure = structure[model_id]
chains = Selection.unfold_entities(structure, 'C')
aa, resseq, icode, seq = [], [], [], []
pos14, pos14_mask = [], []
chain_id, chain_seq = [], []
for i, chain in enumerate(chains):
seq_this = 0
for res in chain:
resname = res.get_resname()
if not augmented_is_aa(resname): continue
if not (res.has_id('CA') and res.has_id('C') and res.has_id('N')): continue
# Chain
chain_id.append(chain.get_id())
chain_seq.append(i+1)
# Residue types
restype = augmented_three_to_index(resname)
aa.append(restype)
# Atom coordinates
pos14_this = get_residue_pos14(res)
pos14_mask_this = pos14_this.isfinite()
pos14.append(pos14_this.nan_to_num(posinf=99999))
pos14_mask.append(pos14_mask_this)
# Sequential number
resseq_this = int(res.get_id()[1])
icode_this = res.get_id()[2]
if seq_this == 0:
seq_this = 1
else:
d_resseq = resseq_this - resseq[-1]
if d_resseq == 0: seq_this += 1
else: seq_this += d_resseq
resseq.append(resseq_this)
icode.append(icode_this)
seq.append(seq_this)
if len(aa) == 0:
return None
return {
'name': structure.get_id(),
# Chain
'chain_id': ''.join(chain_id),
'chain_seq': torch.LongTensor(chain_seq),
# Sequence
'aa': torch.LongTensor(aa),
'resseq': torch.LongTensor(resseq),
'icode': ''.join(icode),
'seq': torch.LongTensor(seq),
# Atom positions
'pos14': torch.stack(pos14),
'pos14_mask': torch.stack(pos14_mask),
}
|
import math
import torch
from torch.utils.data._utils.collate import default_collate
from .protein import ATOM_CA, parse_pdb
class PaddingCollate(object):
def __init__(self, length_ref_key='mutation_mask', pad_values={'aa': 20, 'pos14': float('999'), 'icode': ' ', 'chain_id': '-'}, donot_pad={'foldx'}, eight=False):
super().__init__()
self.length_ref_key = length_ref_key
self.pad_values = pad_values
self.donot_pad = donot_pad
self.eight = eight
def _pad_last(self, x, n, value=0):
if isinstance(x, torch.Tensor):
assert x.size(0) <= n
if x.size(0) == n:
return x
pad_size = [n - x.size(0)] + list(x.shape[1:])
pad = torch.full(pad_size, fill_value=value).to(x)
return torch.cat([x, pad], dim=0)
elif isinstance(x, list):
pad = [value] * (n - len(x))
return x + pad
elif isinstance(x, str):
if value == 0: # Won't pad strings if not specified
return x
pad = value * (n - len(x))
return x + pad
elif isinstance(x, dict):
padded = {}
for k, v in x.items():
if k in self.donot_pad:
padded[k] = v
else:
padded[k] = self._pad_last(v, n, value=self._get_pad_value(k))
return padded
else:
return x
@staticmethod
def _get_pad_mask(l, n):
return torch.cat([
torch.ones([l], dtype=torch.bool),
torch.zeros([n-l], dtype=torch.bool)
], dim=0)
def _get_pad_value(self, key):
if key not in self.pad_values:
return 0
return self.pad_values[key]
def __call__(self, data_list):
max_length = max([data[self.length_ref_key].size(0) for data in data_list])
if self.eight:
max_length = math.ceil(max_length / 8) * 8
data_list_padded = []
for data in data_list:
data_padded = {
k: self._pad_last(v, max_length, value=self._get_pad_value(k))
for k, v in data.items() if k in ('wt', 'mut', 'ddG', 'mutation_mask', 'index', 'mutation')
}
data_padded['mask'] = self._get_pad_mask(data[self.length_ref_key].size(0), max_length)
data_list_padded.append(data_padded)
return default_collate(data_list_padded)
def _mask_list(l, mask):
return [l[i] for i in range(len(l)) if mask[i]]
def _mask_string(s, mask):
return ''.join([s[i] for i in range(len(s)) if mask[i]])
def _mask_dict_recursively(d, mask):
out = {}
for k, v in d.items():
if isinstance(v, torch.Tensor) and v.size(0) == mask.size(0):
out[k] = v[mask]
elif isinstance(v, list) and len(v) == mask.size(0):
out[k] = _mask_list(v, mask)
elif isinstance(v, str) and len(v) == mask.size(0):
out[k] = _mask_string(v, mask)
elif isinstance(v, dict):
out[k] = _mask_dict_recursively(v, mask)
else:
out[k] = v
return out
class KnnResidue(object):
def __init__(self, num_neighbors=128):
super().__init__()
self.num_neighbors = num_neighbors
def __call__(self, data):
pos_CA = data['wt']['pos14'][:, ATOM_CA]
pos_CA_mut = pos_CA[data['mutation_mask']]
diff = pos_CA_mut.view(1, -1, 3) - pos_CA.view(-1, 1, 3)
dist = torch.linalg.norm(diff, dim=-1)
try:
mask = torch.zeros([dist.size(0)], dtype=torch.bool)
mask[ dist.min(dim=1)[0].argsort()[:self.num_neighbors] ] = True
except IndexError as e:
print(data)
raise e
return _mask_dict_recursively(data, mask)
def load_wt_mut_pdb_pair(wt_path, mut_path):
data_wt = parse_pdb(wt_path)
data_mut = parse_pdb(mut_path)
transform = KnnResidue()
collate_fn = PaddingCollate()
mutation_mask = (data_wt['aa'] != data_mut['aa'])
batch = collate_fn([transform({'wt': data_wt, 'mut': data_mut, 'mutation_mask': mutation_mask})])
return batch
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.residue import PerResidueEncoder
from models.attention import GAEncoder
from models.common import get_pos_CB, construct_3d_basis
from utils.protein import ATOM_N, ATOM_CA, ATOM_C
class ComplexEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.relpos_embedding = nn.Embedding(cfg.max_relpos*2+2, cfg.pair_feat_dim)
self.residue_encoder = PerResidueEncoder(cfg.node_feat_dim)
if cfg.geomattn is not None:
self.ga_encoder = GAEncoder(
node_feat_dim = cfg.node_feat_dim,
pair_feat_dim = cfg.pair_feat_dim,
num_layers = cfg.geomattn.num_layers,
spatial_attn_mode = cfg.geomattn.spatial_attn_mode,
)
else:
self.out_mlp = nn.Sequential(
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim),
)
def forward(self, pos14, aa, seq, chain, mask_atom):
"""
Args:
pos14: (N, L, 14, 3).
aa: (N, L).
seq: (N, L).
chain: (N, L).
mask_atom: (N, L, 14)
Returns:
(N, L, node_ch)
"""
same_chain = (chain[:, None, :] == chain[:, :, None]) # (N, L, L)
relpos = (seq[:, None, :] - seq[:, :, None]).clamp(min=-self.cfg.max_relpos, max=self.cfg.max_relpos) + self.cfg.max_relpos # (N, L, L)
relpos = torch.where(same_chain, relpos, torch.full_like(relpos, fill_value=self.cfg.max_relpos*2+1))
pair_feat = self.relpos_embedding(relpos) # (N, L, L, pair_ch)
R = construct_3d_basis(pos14[:, :, ATOM_CA], pos14[:, :, ATOM_C], pos14[:, :, ATOM_N])
# Residue encoder
res_feat = self.residue_encoder(aa, pos14, mask_atom)
# Geom encoder
t = pos14[:, :, ATOM_CA]
mask_residue = mask_atom[:, :, ATOM_CA]
res_feat = self.ga_encoder(R, t, get_pos_CB(pos14, mask_atom), res_feat, pair_feat, mask_residue)
return res_feat
class DDGReadout(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(feat_dim*2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
self.project = nn.Linear(feat_dim, 1, bias=False)
def forward(self, node_feat_wt, node_feat_mut, mask=None):
"""
Args:
node_feat_wt: (N, L, F).
node_feat_mut: (N, L, F).
mask: (N, L).
"""
feat_wm = torch.cat([node_feat_wt, node_feat_mut], dim=-1)
feat_mw = torch.cat([node_feat_mut, node_feat_wt], dim=-1)
feat_diff = self.mlp(feat_wm) - self.mlp(feat_mw) # (N, L, F)
# feat_diff = self.mlp(node_feat_wt) - self.mlp(node_feat_mut)
per_residue_ddg = self.project(feat_diff).squeeze(-1) # (N, L)
if mask is not None:
per_residue_ddg = per_residue_ddg * mask
ddg = per_residue_ddg.sum(dim=1) # (N,)
return ddg
class DDGPredictor(nn.Module):
def __init__(self, cfg):
super().__init__()
self.encoder = ComplexEncoder(cfg)
self.ddG_readout = DDGReadout(cfg.node_feat_dim)
def forward(self, complex_wt, complex_mut, ddG_true=None):
mask_atom_wt = complex_wt['pos14_mask'].all(dim=-1) # (N, L, 14)
mask_atom_mut = complex_mut['pos14_mask'].all(dim=-1)
feat_wt = self.encoder(complex_wt['pos14'], complex_wt['aa'], complex_wt['seq'], complex_wt['chain_seq'], mask_atom_wt)
feat_mut = self.encoder(complex_mut['pos14'], complex_mut['aa'], complex_mut['seq'], complex_mut['chain_seq'], mask_atom_mut)
mask_res = mask_atom_wt[:, :, ATOM_CA]
ddG_pred = self.ddG_readout(feat_wt, feat_mut, mask_res) # One mask is enough
if ddG_true is None:
return ddG_pred
else:
losses = {
'ddG': F.mse_loss(ddG_pred, ddG_true),
}
return losses, ddG_pred
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .common import mask_zero, global_to_local, local_to_global, normalize_vector
def _alpha_from_logits(logits, mask, inf=1e5):
"""
Args:
logits: Logit matrices, (N, L_i, L_j, num_heads).
mask: Masks, (N, L).
Returns:
alpha: Attention weights.
"""
N, L, _, _ = logits.size()
mask_row = mask.view(N, L, 1, 1).expand_as(logits) # (N, L, *, *)
mask_pair = mask_row * mask_row.permute(0, 2, 1, 3) # (N, L, L, *)
logits = torch.where(mask_pair, logits, logits-inf)
alpha = torch.softmax(logits, dim=2) # (N, L, L, num_heads)
alpha = torch.where(mask_row, alpha, torch.zeros_like(alpha))
return alpha
def _heads(x, n_heads, n_ch):
"""
Args:
x: (..., num_heads * num_channels)
Returns:
(..., num_heads, num_channels)
"""
s = list(x.size())[:-1] + [n_heads, n_ch]
return x.view(*s)
class GeometricAttention(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, spatial_attn_mode='CB', value_dim=16, query_key_dim=16, num_query_points=8, num_value_points=8, num_heads=12):
super().__init__()
self.node_feat_dim = node_feat_dim
self.pair_feat_dim = pair_feat_dim
self.value_dim = value_dim
self.query_key_dim = query_key_dim
self.num_query_points = num_query_points
self.num_value_points = num_value_points
self.num_heads = num_heads
assert spatial_attn_mode in ('CB', 'vpoint')
self.spatial_attn_mode = spatial_attn_mode
# Node
self.proj_query = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_key = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_value = nn.Linear(node_feat_dim, value_dim*num_heads, bias=False)
# Pair
self.proj_pair_bias = nn.Linear(pair_feat_dim, num_heads, bias=False)
# Spatial
self.spatial_coef = nn.Parameter(torch.full([1, 1, 1, self.num_heads], fill_value=np.log(np.exp(1.) - 1.)), requires_grad=True)
if spatial_attn_mode == 'vpoint':
self.proj_query_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_key_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_value_point = nn.Linear(node_feat_dim, num_value_points*num_heads*3, bias=False)
# Output
if spatial_attn_mode == 'CB':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*(3+3+1)),
out_features = node_feat_dim,
)
elif spatial_attn_mode == 'vpoint':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*num_value_points*(3+3+1)),
out_features = node_feat_dim,
)
self.layer_norm = nn.LayerNorm(node_feat_dim)
def _node_logits(self, x):
query_l = _heads(self.proj_query(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
key_l = _heads(self.proj_key(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
query_l = query_l.permute(0, 2, 1, 3) # (N,L1,H,C) -> (N,H,L1,C)
key_l = key_l.permute(0, 2, 3, 1) # (N,L2,H,C) -> (N,H,C,L2)
logits = torch.matmul(query_l, key_l) # (N,H,L1,L2)
logits = logits.permute(0, 2, 3, 1) # (N,L1,L2,H)
# logits = (query_l.unsqueeze(2) * key_l.unsqueeze(1) * (1 / np.sqrt(self.query_key_dim))).sum(-1) # (N, L, L, num_heads)
return logits
def _pair_logits(self, z):
logits_pair = self.proj_pair_bias(z)
return logits_pair
def _beta_logits(self, R, t, p_CB):
N, L, _ = t.size()
qk = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3)
sum_sq_dist = ((qk.unsqueeze(2) - qk.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logtis_beta = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / 9)) / 2)
return logtis_beta
def _spatial_logits(self, R, t, x):
N, L, _ = t.size()
# Query
query_points = _heads(self.proj_query_point(x), self.num_heads*self.num_query_points, 3) # (N, L, n_heads * n_pnts, 3)
query_points = local_to_global(R, t, query_points) # Global query coordinates, (N, L, n_heads * n_pnts, 3)
query_s = query_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Key
key_points = _heads(self.proj_key_point(x), self.num_heads*self.num_query_points, 3) # (N, L, 3, n_heads * n_pnts)
key_points = local_to_global(R, t, key_points) # Global key coordinates, (N, L, n_heads * n_pnts, 3)
key_s = key_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Q-K Product
sum_sq_dist = ((query_s.unsqueeze(2) - key_s.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logits_spatial = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / (9 * self.num_query_points))) / 2) # (N, L, L, n_heads)
return logits_spatial
def _pair_aggregation(self, alpha, z):
N, L = z.shape[:2]
feat_p2n = alpha.unsqueeze(-1) * z.unsqueeze(-2) # (N, L, L, n_heads, C)
feat_p2n = feat_p2n.sum(dim=2) # (N, L, n_heads, C)
return feat_p2n.reshape(N, L, -1)
def _node_aggregation(self, alpha, x):
N, L = x.shape[:2]
value_l = _heads(self.proj_value(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, v_ch)
feat_node = alpha.unsqueeze(-1) * value_l.unsqueeze(1) # (N, L, L, n_heads, *) @ (N, *, L, n_heads, v_ch)
feat_node = feat_node.sum(dim=2) # (N, L, n_heads, v_ch)
return feat_node.reshape(N, L, -1)
def _beta_aggregation(self, alpha, R, t, p_CB, x):
N, L, _ = t.size()
v = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3) # (N, L, n_heads, 3)
aggr = alpha.reshape(N, L, L, self.num_heads, 1) * v.unsqueeze(1) # (N, *, L, n_heads, 3)
aggr = aggr.sum(dim=2)
feat_points = global_to_local(R, t, aggr) # (N, L, n_heads, 3)
feat_distance = feat_points.norm(dim=-1)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def _spatial_aggregation(self, alpha, R, t, x):
N, L, _ = t.size()
value_points = _heads(self.proj_value_point(x), self.num_heads*self.num_value_points, 3) # (N, L, n_heads * n_v_pnts, 3)
value_points = local_to_global(R, t, value_points.reshape(N, L, self.num_heads, self.num_value_points, 3)) # (N, L, n_heads, n_v_pnts, 3)
aggr_points = alpha.reshape(N, L, L, self.num_heads, 1, 1) * value_points.unsqueeze(1) # (N, *, L, n_heads, n_pnts, 3)
aggr_points = aggr_points.sum(dim=2) # (N, L, n_heads, n_pnts, 3)
feat_points = global_to_local(R, t, aggr_points) # (N, L, n_heads, n_pnts, 3)
feat_distance = feat_points.norm(dim=-1) # (N, L, n_heads, n_pnts)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4) # (N, L, n_heads, n_pnts, 3)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def forward_beta(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._beta_logits(R, t, p_CB)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._beta_aggregation(alpha, R, t, p_CB, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward_vpoint(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._spatial_logits(R, t, x)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._spatial_aggregation(alpha, R, t, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward(self, R, t, p_CB, x, z, mask):
if self.spatial_attn_mode == 'CB':
return self.forward_beta(R, t, p_CB, x, z, mask)
else:
return self.forward_vpoint(R, t, p_CB, x, z, mask)
class GAEncoder(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, num_layers, spatial_attn_mode='CB'):
super().__init__()
self.blocks = nn.ModuleList([
GeometricAttention(node_feat_dim, pair_feat_dim, spatial_attn_mode=spatial_attn_mode)
for _ in range(num_layers)
])
def forward(self, R, t, p_CB, x, z, mask):
for block in self.blocks:
x = block(R, t, p_CB, x, z, mask) # Residual connection within the block
return x
|
import torch
import torch.nn as nn
from models.common import PositionalEncoding, construct_3d_basis, global_to_local
class PerResidueEncoder(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.aatype_embed = nn.Embedding(21, feat_dim)
self.torsion_embed = PositionalEncoding()
self.mlp = nn.Sequential(
nn.Linear(21*14*3 + feat_dim, feat_dim * 2), nn.ReLU(),
nn.Linear(feat_dim * 2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
def forward(self, aa, pos14, atom_mask):
"""
Args:
aa: (N, L).
pos14: (N, L, 14, 3).
atom_mask: (N, L, 14).
"""
N, L = aa.size()
R = construct_3d_basis(pos14[:, :, 1], pos14[:, :, 2], pos14[:, :, 0]) # (N, L, 3, 3)
t = pos14[:, :, 1] # (N, L, 3)
crd14 = global_to_local(R, t, pos14) # (N, L, 14, 3)
crd14_mask = atom_mask[:, :, :, None].expand_as(crd14)
crd14 = torch.where(crd14_mask, crd14, torch.zeros_like(crd14))
aa_expand = aa[:, :, None, None, None].expand(N, L, 21, 14, 3)
rng_expand = torch.arange(0, 21)[None, None, :, None, None].expand(N, L, 21, 14, 3).to(aa_expand)
place_mask = (aa_expand == rng_expand)
crd_expand = crd14[:, :, None, :, :].expand(N, L, 21, 14, 3)
crd_expand = torch.where(place_mask, crd_expand, torch.zeros_like(crd_expand))
crd_feat = crd_expand.reshape(N, L, 21 * 14 * 3)
aa_feat = self.aatype_embed(aa) # (N, L, feat)
out_feat = self.mlp(torch.cat([crd_feat, aa_feat], dim=-1))
return out_feat
|
import torch
import torch.nn as nn
from utils.protein import ATOM_CA, ATOM_CB
def get_pos_CB(pos14, atom_mask):
"""
Args:
pos14: (N, L, 14, 3)
atom_mask: (N, L, 14)
"""
N, L = pos14.shape[:2]
mask_CB = atom_mask[:, :, ATOM_CB] # (N, L)
mask_CB = mask_CB[:, :, None].expand(N, L, 3)
pos_CA = pos14[:, :, ATOM_CA] # (N, L, 3)
pos_CB = pos14[:, :, ATOM_CB]
return torch.where(mask_CB, pos_CB, pos_CA)
def mask_zero(mask, value):
return torch.where(mask, value, torch.zeros_like(value))
class PositionalEncoding(nn.Module):
def __init__(self, num_funcs=6):
super().__init__()
self.num_funcs = num_funcs
self.register_buffer('freq_bands', 2.0 ** torch.linspace(0.0, num_funcs-1, num_funcs))
def get_out_dim(self, in_dim):
return in_dim * (2 * self.num_funcs + 1)
def forward(self, x):
"""
Args:
x: (..., d).
"""
shape = list(x.shape[:-1]) + [-1]
x = x.unsqueeze(-1) # (..., d, 1)
code = torch.cat([x, torch.sin(x * self.freq_bands), torch.cos(x * self.freq_bands)], dim=-1) # (..., d, 2f+1)
code = code.reshape(shape)
return code
def safe_norm(x, dim=-1, keepdim=False, eps=1e-8, sqrt=True):
out = torch.clamp(torch.sum(torch.square(x), dim=dim, keepdim=keepdim), min=eps)
return torch.sqrt(out) if sqrt else out
def normalize_vector(v, dim, eps=1e-6):
return v / (torch.linalg.norm(v, ord=2, dim=dim, keepdim=True) + eps)
def project_v2v(v, e, dim):
"""
Description:
Project vector `v` onto vector `e`.
Args:
v: (N, L, 3).
e: (N, L, 3).
"""
return (e * v).sum(dim=dim, keepdim=True) * e
def construct_3d_basis(center, p1, p2):
"""
Args:
center: (N, L, 3), usually the position of C_alpha.
p1: (N, L, 3), usually the position of C.
p2: (N, L, 3), usually the position of N.
Returns
A batch of orthogonal basis matrix, (N, L, 3, 3cols_index).
The matrix is composed of 3 column vectors: [e1, e2, e3].
"""
v1 = p1 - center # (N, L, 3)
e1 = normalize_vector(v1, dim=-1)
v2 = p2 - center # (N, L, 3)
u2 = v2 - project_v2v(v2, e1, dim=-1)
e2 = normalize_vector(u2, dim=-1)
e3 = torch.cross(e1, e2, dim=-1) # (N, L, 3)
mat = torch.cat([
e1.unsqueeze(-1), e2.unsqueeze(-1), e3.unsqueeze(-1)
], dim=-1) # (N, L, 3, 3_index)
return mat
def local_to_global(R, t, p):
"""
Description:
Convert local (internal) coordinates to global (external) coordinates q.
q <- Rp + t
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
p: Local coordinates, (N, L, ..., 3).
Returns:
q: Global coordinates, (N, L, ..., 3).
"""
assert p.size(-1) == 3
p_size = p.size()
N, L = p_size[0], p_size[1]
p = p.view(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
q = torch.matmul(R, p) + t.unsqueeze(-1) # (N, L, 3, *)
q = q.transpose(-1, -2).reshape(p_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return q
def global_to_local(R, t, q):
"""
Description:
Convert global (external) coordinates q to local (internal) coordinates p.
p <- R^{T}(q - t)
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
q: Global coordinates, (N, L, ..., 3).
Returns:
p: Local coordinates, (N, L, ..., 3).
"""
assert q.size(-1) == 3
q_size = q.size()
N, L = q_size[0], q_size[1]
q = q.reshape(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
if t is None:
p = torch.matmul(R.transpose(-1, -2), q) # (N, L, 3, *)
else:
p = torch.matmul(R.transpose(-1, -2), (q - t.unsqueeze(-1))) # (N, L, 3, *)
p = p.transpose(-1, -2).reshape(q_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return p
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import argparse
import torch
from models.predictor import DDGPredictor
from utils.misc import *
from utils.data import *
from utils.protein import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('wt_pdb', type=str)
parser.add_argument('mut_pdb', type=str)
parser.add_argument('--model', type=str, default='./data/model.pt')
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
batch = load_wt_mut_pdb_pair(args.wt_pdb, args.mut_pdb)
batch = recursive_to(batch, args.device)
ckpt = torch.load(args.model)
config = ckpt['config']
weight = ckpt['model']
model = DDGPredictor(config.model).to(args.device)
model.load_state_dict(weight)
with torch.no_grad():
model.eval()
pred = model(batch['wt'], batch['mut'])
print('Predicted ddG: %.2f' % pred.item())
|
from aoa_pytorch.aoa_pytorch import AttentionOnAttention
AoA = AttentionOnAttention
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class AttentionOnAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
aoa_dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.dropout = nn.Dropout(dropout)
self.aoa = nn.Sequential(
nn.Linear(2 * inner_dim, 2 * dim),
nn.GLU(),
nn.Dropout(aoa_dropout)
)
def forward(self, x, context = None):
h = self.heads
q_ = self.to_q(x)
context = default(context, x)
kv = self.to_kv(context).chunk(2, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q_, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# attention
attn = dots.softmax(dim = -1)
attn = self.dropout(attn)
# weighted average of values
attn_out = einsum('b h i j, b h j d -> b h i d', attn, v)
# concat heads
out = rearrange(attn_out, 'b h n d -> b n (h d)', h = h)
# attention on attention
out = self.aoa(torch.cat((out, q_), dim = -1))
return out
|
from adjacent_attention_network.adjacent_attention_network import AdjacentAttentionNetwork
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from isab_pytorch import ISAB
# helpers
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
# adjacent attention class
class AdjacentAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 4,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.null_k = nn.Parameter(torch.randn(heads, dim_head))
self.null_v = nn.Parameter(torch.randn(heads, dim_head))
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
adj_kv_indices,
mask
):
b, n, d, h = *x.shape, self.heads
flat_indices = repeat(adj_kv_indices, 'b n a -> (b h) (n a)', h = h)
# derive query, key, value
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# gather keys and values according to adjacency matrix
k, v = map(lambda t: rearrange(t, 'b h n d -> (b h) n d'), (k, v))
k = batched_index_select(k, flat_indices)
v = batched_index_select(v, flat_indices)
k, v = map(lambda t: rearrange(t, '(b h) (n a) d -> b h n a d', h = h, n = n), (k, v))
# add null key / value, so a node can attend to nothing
# have come across this in GNN literature as some other name
nk, nv = map(lambda t: rearrange(t, 'h d -> () h () () d').expand(b, -1, n, 1, -1), (self.null_k, self.null_v))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
mask = F.pad(mask, (1, 0), value = 1)
# similarity of each node to its neighbors
sim = einsum('b h n d, b h n a d -> b h n a', q, k) * self.scale
# mask out neighbors that are just padding
mask_value = -torch.finfo(sim.dtype).max
mask = rearrange(mask.bool(), 'b n a -> b () n a')
sim.masked_fill_(~mask.bool(), mask_value)
# attention
attn = sim.softmax(dim = -1)
# dropout
attn = self.dropout(attn)
# get weighted average of the values of all neighbors
out = einsum('b h n a, b h n a d -> b h n d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# combine output
return self.to_out(out)
# adjacent network (layers of adjacent attention)
class AdjacentAttentionNetwork(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 4,
num_neighbors_cutoff = None,
num_global_nodes = 0,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.num_neighbors_cutoff = num_neighbors_cutoff
self.layers = nn.ModuleList([])
for _ in range(depth):
global_attn = PreNorm(dim, ISAB(
dim = dim,
heads = heads,
num_induced_points = num_global_nodes
)) if num_global_nodes > 0 else None
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, AdjacentAttention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
))),
global_attn,
Residual(PreNorm(dim, FeedForward(
dim = dim,
dropout = ff_dropout
)))
]))
def forward(self, x, adjacency_mat, mask = None):
device, n = x.device, x.shape[1]
diag = torch.eye(adjacency_mat.shape[-1], device = device).bool()
adjacency_mat |= diag # nodes should pay attention itself (self-interacting)
# zero out points on adjacency matrix
# where the nodes are just padding
if exists(mask):
adjacency_mat &= (mask[:, :, None] * mask[:, None, :])
adj_mat = adjacency_mat.float()
# if we don't set a hard limit to the number of neighbors:
# - get the maximum number of neighbors and pad the rest of the nodes with less than that number of neighbors
# else:
# - randomly sample the cutoff number of neighbors for any node that exceeds the max
# - this would be similar to random sparse attention (bigbird)
# get the maximum number of neighbors
max_neighbors = int(adj_mat.sum(dim = -1).max())
if exists(self.num_neighbors_cutoff) and max_neighbors > self.num_neighbors_cutoff:
# to randomly sample the neighbors, add a small uniform noise to the mask and topk
noise = torch.empty((n, n), device = device).uniform_(-0.01, 0.01)
adj_mat = adj_mat + noise
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = self.num_neighbors_cutoff)
# cast the mask back to 0s and 1s
adj_mask = (adj_mask > 0.5).float()
else:
# todo - get distribution of number of neighbors, and strategically break up attention (message passing) to multiple steps
# - start with a bimodal num neighbors test case, then generalize
# use topk to get all the neighbors
# also pass the mask into the attention, as some neighbors will be just padding and not actually neighbors
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = max_neighbors)
for attn, global_attn, ff in self.layers:
x = attn(
x,
adj_kv_indices = adj_kv_indices,
mask = adj_mask
)
if exists(global_attn):
out, _ = global_attn(x, mask = mask)
x = x + out
x = ff(x)
return x
|
import torch
import os
import logging
from transformers import AutoTokenizer, AutoModelForMaskedLM, logging
from tf_bind_transformer.cache_utils import cache_fn, run_once
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
MODELS = dict(
pubmed = dict(
dim = 768,
path = 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
)
)
GLOBAL_VARIABLES = dict(model = None, tokenizer = None)
def get_contextual_dim(model_name):
assert model_name in MODELS
return MODELS[model_name]['dim']
@run_once('init_transformer')
def init_transformer(model_name):
path = MODELS[model_name]['path']
GLOBAL_VARIABLES['tokenizer'] = AutoTokenizer.from_pretrained(path)
model = AutoModelForMaskedLM.from_pretrained(path)
if not CONTEXT_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
@torch.no_grad()
def tokenize_text(
text,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True
):
init_transformer(model_name)
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[text],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not CONTEXT_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
if return_cls_token:
return hidden_state[0]
return hidden_state.mean(dim = 0)
def get_text_repr(
texts,
*,
device,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True,
):
assert model_name in MODELS, f'{model_name} not found in available text transformers to use'
if isinstance(texts, str):
texts = [texts]
get_context_repr_fn = cache_fn(tokenize_text, path = f'contexts/{model_name}')
representations = [get_context_repr_fn(text, max_length = max_length, model_name = model_name, hidden_state_index = hidden_state_index, return_cls_token = return_cls_token) for text in texts]
return torch.stack(representations).to(device)
|
from chroma_pytorch.chroma_pytorch import Chroma
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 29