python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEURT's Tensorflow ops."""
from bleurt import checkpoint as checkpoint_lib
from bleurt.lib import optimization
import numpy as np
from scipy import stats
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tf_slim import metrics
from bleurt.lib import modeling
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
# BLEURT flags.
flags.DEFINE_string("bleurt_checkpoint_name", "bert_custom",
"Name of the BLEURT export to be created.")
flags.DEFINE_string("init_bleurt_checkpoint", None,
"Existing BLEURT export to be fine-tuned.")
# BERT flags.
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", None,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("dynamic_seq_length", True,
"Exports model with dymaic sequence length.")
# Flags to control training setup.
flags.DEFINE_enum("export_metric", "kendalltau", ["correlation", "kendalltau"],
"Metric to chose the best model in export functions.")
flags.DEFINE_integer("shuffle_buffer_size", 500,
"Size of buffer used to shuffle the examples.")
# Flags to contol optimization.
flags.DEFINE_enum("optimizer", "adam", ["adam", "sgd", "adagrad"],
"Which optimizer to use.")
flags.DEFINE_float("learning_rate", 1e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_bool(
"use_ranking_loss", False,
"Whether to use a ranking loss instead of regression (l2 loss).")
# BLEURT model flags.
flags.DEFINE_integer("n_hidden_layers", 0,
"Number of fully connected/RNN layers before prediction.")
flags.DEFINE_integer("hidden_layers_width", 128, "Width of hidden layers.")
flags.DEFINE_float("dropout_rate", 0,
"Probability of dropout over BERT embedding.")
flags.DEFINE_float("random_seed", 55555, "Random seed for TensorFlow.")
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, use_one_hot_embeddings, n_hidden_layers,
hidden_layers_width, dropout_rate):
"""Creates a regression model, loosely adapted from language/bert.
Args:
bert_config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: int32 Tensor of shape [batch_size, seq_length].
segment_ids: int32 Tensor of shape [batch_size, seq_length].
labels: float32 Tensor of shape [batch_size].
use_one_hot_embeddings: Whether to use one-hot word embeddings or
tf.embedding_lookup() for the word embeddings.
n_hidden_layers: number of FC layers before prediction.
hidden_layers_width: width of FC layers.
dropout_rate: probability of dropout over BERT embedding.
Returns:
loss: <float32>[]
per_example_loss: <float32>[batch_size]
pred: <float32>[batch_size]
"""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# <float>[batch_size, hidden_size]
output_layer = model.get_pooled_output()
bert_embed_size = output_layer.shape[-1]
logging.info("BERT embedding width: {}".format(str(bert_embed_size)))
if is_training and dropout_rate > 0:
# Implements dropout on top of BERT's pooled output.
# <float32>[batch_size, hidden_size]
output_layer = tf.nn.dropout(output_layer, rate=dropout_rate)
# Hidden layers
for i in range(n_hidden_layers):
# <float32>[batch_size, hidden_layers_width]
logging.info("Adding hidden layer {}".format(i + 1))
output_layer = tf.layers.dense(
output_layer, hidden_layers_width, activation=tf.nn.relu)
logging.info("Building linear output...")
# <float32>[batch_size,1]
predictions = tf.layers.dense(
output_layer, 1, bias_initializer=tf.constant_initializer(0.15))
# <float32>[batch_size]
predictions = tf.squeeze(predictions, 1)
# <float32>[batch_size]
if FLAGS.use_ranking_loss:
per_example_loss = ranking_loss(predictions, labels)
else:
per_example_loss = tf.pow(predictions - labels, 2)
# <float32> []
loss = tf.reduce_mean(per_example_loss, axis=-1)
return (loss, per_example_loss, predictions)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, n_hidden_layers,
hidden_layers_width, dropout_rate):
"""Returns `model_fn` closure."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator/TPUEstimator."""
logging.info("*** Building Regression BERT Model ***")
tf.set_random_seed(FLAGS.random_seed)
logging.info("*** Features ***")
for name in sorted(features.keys()):
logging.info(" name = %s, shape = %s", name, features[name].shape)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
if mode != tf_estimator.ModeKeys.PREDICT:
scores = features["score"]
else:
scores = tf.zeros(tf.shape(input_ids)[0])
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
total_loss, per_example_loss, pred = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, scores,
use_one_hot_embeddings, n_hidden_layers, hidden_layers_width,
dropout_rate)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
# Loads pretrained model
logging.info("**** Initializing from {} ****".format(init_checkpoint))
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
if use_tpu:
output_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
output_spec = tf_estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf_estimator.ModeKeys.EVAL:
if use_tpu:
eval_metrics = (metric_fn, [per_example_loss, pred, scores])
output_spec = tf_estimator.TPUEstimatorSpec(
mode=mode, loss=total_loss, eval_metric=eval_metrics)
else:
output_spec = tf_estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=metric_fn(per_example_loss, pred, scores))
elif mode == tf_estimator.ModeKeys.PREDICT:
output_spec = tf_estimator.EstimatorSpec(
mode=mode, predictions={"predictions": pred})
return output_spec
return model_fn
def ranking_loss(predictions, labels):
"""Ranking loss as described in https://arxiv.org/pdf/2009.01325.pdf."""
# We found that clipping the predictions during training helps, since the
# ranking loss itself does not constrain the predictions.
# TODO(tsellam): Understand why clipping helps.
predictions = tf.clip_by_value(predictions, 0.0, 1.0)
# Gets pairs of predictions and pairs of labels in the same order.
ii, jj = tf.meshgrid(
tf.range(0,
tf.shape(predictions)[0]),
tf.range(0,
tf.shape(predictions)[0]),
indexing="ij")
indices = tf.stack([tf.reshape(ii, [-1]), tf.reshape(jj, [-1])], axis=1)
indices = tf.boolean_mask(indices, indices[:, 0] < indices[:, 1])
prediction_pairs = tf.gather(predictions, indices, axis=0)
label_pairs = tf.gather(labels, indices, axis=0)
# For each pair, the loss is - log(sigmoid(s_i - s_j)) where i is the better
# translation according to the labels and s_k denotes the predicted score
# for translation k.
score_diffs = (prediction_pairs[:, 0] - prediction_pairs[:, 1]) * (
tf.cast(label_pairs[:, 0] > label_pairs[:, 1], tf.float32) * 2 - 1)
per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(score_diffs), logits=score_diffs)
return per_example_loss
# TF ops to compute the metrics.
def concat_tensors(predictions, ratings, sources=None):
"""Concatenates batches of ratings and predictions."""
concat_predictions_value, concat_predictions_update = (
metrics.streaming_concat(predictions))
concat_labels_value, concat_labels_update = (
metrics.streaming_concat(ratings))
if sources is None:
return (concat_predictions_value, concat_labels_value,
tf.group(concat_predictions_update, concat_labels_update))
concat_sources_value, concat_sources_update = (
metrics.streaming_concat(sources))
return (concat_predictions_value, concat_labels_value, concat_sources_value,
tf.group(concat_predictions_update, concat_labels_update,
concat_sources_update))
def kendall_tau_metric(predictions, ratings, weights=None):
"""Builds the computation graph for Kendall Tau metric."""
def _kendall_tau(x, y):
tau = stats.kendalltau(x, y)[0]
return np.array(tau).astype(np.float32)
if weights is not None:
predictions = tf.boolean_mask(predictions, weights)
ratings = tf.boolean_mask(ratings, weights)
with tf.variable_scope("kendall_tau"):
concat_predictions_value, concat_labels_value, update_op = (
concat_tensors(predictions, ratings))
metric_value = tf.reshape(
tf.numpy_function(_kendall_tau,
[concat_predictions_value, concat_labels_value],
tf.float32),
shape=[])
return metric_value, update_op
def metric_fn(per_example_loss, pred, ratings):
"""Metrics for BLEURT experiments."""
# Mean of predictions
mean_pred = tf.metrics.mean(values=pred)
# Standard deviation of predictions
mean = tf.reduce_mean(pred)
diffs = tf.sqrt(tf.pow(pred - mean, 2))
pred_sd = tf.metrics.mean(values=diffs)
# Average squared error
mean_loss = tf.metrics.mean(values=per_example_loss)
# Average absolute error
squared_diff = tf.pow(pred - ratings, 2)
per_example_err = tf.sqrt(squared_diff)
mean_err = tf.metrics.mean(per_example_err)
# Pearson correlation
correlation = metrics.streaming_pearson_correlation(pred, ratings)
# Kendall Tau
kendalltau = kendall_tau_metric(pred, ratings)
output = {
"eval_loss": mean_loss,
"eval_mean_err": mean_err,
"eval_mean_pred": mean_pred,
"eval_pred_sd": pred_sd,
"correlation": correlation,
"kendalltau": kendalltau,
}
return output
def input_fn_builder(tfrecord_file,
seq_length,
is_training,
batch_size,
drop_remainder=True):
"""Creates an `input_fn` closure to be passed to Estimator."""
logging.info(
"Creating input fun with batch_size: {} and drop remainder: {}".format(
str(batch_size), str(drop_remainder)))
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"score": tf.FixedLenFeature([], tf.float32)
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params): # pylint: disable=unused-argument
"""Acutal data generator."""
tfrecord_file_expanded = tf.io.gfile.glob(tfrecord_file)
n_files = len(tfrecord_file_expanded)
if n_files > 1:
logging.info("Found {} files matching {}".format(
str(n_files), tfrecord_file))
d = tf.data.TFRecordDataset(tfrecord_file_expanded)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
d = d.map(lambda record: _decode_record(record, name_to_features))
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def _model_comparator(best_eval_result, current_eval_result):
metric = FLAGS.export_metric
return best_eval_result[metric] <= current_eval_result[metric]
def _serving_input_fn_builder(seq_length):
"""Input function for exported models."""
# We had to use `tf.zeros` instead of the usual
# `tf.placeholder(tf.int64, shape=[None, seq_length])` to be compatible with
# TF2's eager mode, which deprecates all calls to `tf.placeholder`.
if tf.executing_eagerly():
assert not FLAGS.dynamic_seq_length, (
"Training with `dynamic_seq_length is not supported in Eager mode.")
name_to_features = {
"input_ids": tf.zeros(dtype=tf.int64, shape=[0, seq_length]),
"input_mask": tf.zeros(dtype=tf.int64, shape=[0, seq_length]),
"segment_ids": tf.zeros(dtype=tf.int64, shape=[0, seq_length])
}
else:
if FLAGS.dynamic_seq_length:
logging.info("Exporting a model with dynamic sequence length.")
name_to_features = {
"input_ids": tf.placeholder(tf.int64, shape=[None, None]),
"input_mask": tf.placeholder(tf.int64, shape=[None, None]),
"segment_ids": tf.placeholder(tf.int64, shape=[None, None])
}
else:
name_to_features = {
"input_ids": tf.placeholder(tf.int64, shape=[None, seq_length]),
"input_mask": tf.placeholder(tf.int64, shape=[None, seq_length]),
"segment_ids": tf.placeholder(tf.int64, shape=[None, seq_length])
}
return tf_estimator.export.build_raw_serving_input_receiver_fn(
name_to_features)
def run_finetuning(train_tfrecord,
dev_tfrecord,
train_eval_fun=None,
use_tpu=False,
multi_eval_names=None,
additional_train_params=None):
"""Main function to train and eval BLEURT."""
logging.info("Initializing BLEURT training pipeline.")
bleurt_params = checkpoint_lib.get_bleurt_params_from_flags_or_ckpt()
max_seq_length = bleurt_params["max_seq_length"]
bert_config_file = bleurt_params["bert_config_file"]
init_checkpoint = bleurt_params["init_checkpoint"]
logging.info("Creating input data pipeline.")
logging.info("Train batch size: {}, eval batch size: {}".format(
str(FLAGS.batch_size), str(FLAGS.eval_batch_size)))
logging.info("Train data: {}".format(train_tfrecord))
train_input_fn = input_fn_builder(
train_tfrecord,
seq_length=max_seq_length,
is_training=True,
batch_size=FLAGS.batch_size,
drop_remainder=use_tpu)
additional_eval_specs = None
if isinstance(dev_tfrecord, str):
logging.info("Validation data: {}".format(dev_tfrecord))
dev_input_fn = input_fn_builder(
dev_tfrecord,
seq_length=max_seq_length,
is_training=False,
batch_size=FLAGS.eval_batch_size,
drop_remainder=use_tpu)
elif isinstance(dev_tfrecord, list) and dev_tfrecord:
logging.info("Validation data: {}".format(",".join(dev_tfrecord)))
dev_input_fn = input_fn_builder(
dev_tfrecord[0],
seq_length=max_seq_length,
is_training=False,
batch_size=FLAGS.eval_batch_size,
drop_remainder=use_tpu)
additional_eval_specs = []
for i in range(1, len(dev_tfrecord)):
additional_dev_input_fn = input_fn_builder(
dev_tfrecord[i],
seq_length=max_seq_length,
is_training=False,
batch_size=FLAGS.eval_batch_size,
drop_remainder=use_tpu)
eval_name = multi_eval_names[i] if multi_eval_names and len(
multi_eval_names) > i else "eval_%s" % i
additional_eval_specs.append(
tf_estimator.EvalSpec(
name=eval_name,
input_fn=additional_dev_input_fn,
steps=FLAGS.num_eval_steps))
logging.info("len(additional_eval_specs): ", len(additional_eval_specs))
else:
raise ValueError(
"dev_tfrecord can only be a string or list: {}".format(dev_tfrecord))
logging.info("Creating model.")
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
num_train_steps = FLAGS.num_train_steps
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=use_tpu,
use_one_hot_embeddings=use_tpu,
n_hidden_layers=FLAGS.n_hidden_layers,
hidden_layers_width=FLAGS.hidden_layers_width,
dropout_rate=FLAGS.dropout_rate)
logging.info("Creating TF Estimator.")
exporters = [
tf_estimator.BestExporter(
"bleurt_best",
serving_input_receiver_fn=_serving_input_fn_builder(max_seq_length),
event_file_pattern="eval_default/*.tfevents.*",
compare_fn=_model_comparator,
exports_to_keep=3)
]
tf.enable_resource_variables()
logging.info("*** Entering the Training / Eval phase ***")
if not additional_train_params:
additional_train_params = {}
train_eval_fun(
model_fn=model_fn,
train_input_fn=train_input_fn,
eval_input_fn=dev_input_fn,
additional_eval_specs=additional_eval_specs,
exporters=exporters,
**additional_train_params)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scoring function."""
import os
from bleurt import score
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
references = [
"An apple a day keeps the doctor away.",
"An apple a day keeps the doctor away."
]
candidates = [
"An apple a day keeps the doctor away.",
"An apple a day keeps doctors away."
]
ref_scores = [0.910811, 0.771989]
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
class ScoreTest(tf.test.TestCase):
def test_default_bleurt_score(self):
bleurt = score.BleurtScorer()
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_positional_args_error(self):
bleurt = score.BleurtScorer()
with self.assertRaises(AssertionError):
_ = bleurt.score(references, candidates)
def test_bleurt_nulls(self):
bleurt = score.BleurtScorer()
test_references = []
test_candidates = []
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 0)
def test_bleurt_empty(self):
bleurt = score.BleurtScorer()
test_references = [""]
test_candidates = [""]
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 1)
def test_bleurt_score_with_checkpoint(self):
checkpoint = get_test_checkpoint()
bleurt = score.BleurtScorer(checkpoint)
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_tf_bleurt_score_eager(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
bleurt_out = bleurt_ops(references=tfreferences, candidates=tfcandidates)
# Computes the BLEURT scores.
self.assertIn("predictions", bleurt_out)
self.assertEqual(bleurt_out["predictions"].shape, (2,))
self.assertAllClose(bleurt_out["predictions"], ref_scores)
def test_tf_bleurt_positional_args_error(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
with self.assertRaises(AssertionError):
_ = bleurt_ops(tfreferences, tfcandidates)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Fine-tunes a BERT/BLEURT checkpoint."""
import os
from bleurt import checkpoint as checkpoint_lib
from bleurt import encoding
from bleurt import model
from bleurt.lib import experiment_utils
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
# Data pipeline.
flags.DEFINE_string("train_set", None,
"Path to JSONL file for the training ratings.")
flags.DEFINE_string("dev_set", None, "Path to JSONL file for the dev ratings.")
flags.DEFINE_string(
"serialized_train_set", None,
"Target file where the serialized train set will be"
" created. Will use a temp file if None.")
flags.DEFINE_string(
"serialized_dev_set", None,
"Target file where the serialized dev set will be"
" created. Will use a temp file if None.")
# See model.py and lib/experiment_utils.py for other important flags.
def run_finetuning_pipeline(train_set, dev_set, run_in_lazy_mode=True):
"""Runs the full BLEURT fine-tuning pipeline."""
if run_in_lazy_mode:
tf.disable_eager_execution()
bleurt_params = checkpoint_lib.get_bleurt_params_from_flags_or_ckpt()
# Preprocessing and encoding for train and dev set.
logging.info("*** Running pre-processing pipeline for training examples.")
if FLAGS.serialized_train_set:
train_tfrecord = FLAGS.serialized_train_set
else:
train_tfrecord = train_set + ".tfrecord"
encoding.encode_and_serialize(
train_set,
train_tfrecord,
vocab_file=bleurt_params["vocab_file"],
do_lower_case=bleurt_params["do_lower_case"],
sp_model=bleurt_params["sp_model"],
max_seq_length=bleurt_params["max_seq_length"])
logging.info("*** Running pre-processing pipeline for eval examples.")
if FLAGS.serialized_dev_set:
dev_tfrecord = FLAGS.serialized_dev_set
else:
dev_tfrecord = dev_set + ".tfrecord"
encoding.encode_and_serialize(
dev_set,
dev_tfrecord,
vocab_file=bleurt_params["vocab_file"],
do_lower_case=bleurt_params["do_lower_case"],
sp_model=bleurt_params["sp_model"],
max_seq_length=bleurt_params["max_seq_length"])
# Actual fine-tuning work.
logging.info("*** Running fine-tuning.")
train_eval_fun = experiment_utils.run_experiment
model.run_finetuning(train_tfrecord, dev_tfrecord, train_eval_fun)
# Deletes temp files.
if not FLAGS.serialized_train_set:
logging.info("Deleting serialized training examples.")
tf.io.gfile.remove(train_tfrecord)
if not FLAGS.serialized_dev_set:
logging.info("Deleting serialized dev examples.")
tf.io.gfile.remove(dev_tfrecord)
# Gets export location.
glob_pattern = os.path.join(FLAGS.model_dir, "export", "bleurt_best", "*")
export_dirs = tf.io.gfile.glob(glob_pattern)
assert export_dirs, "Model export directory not found."
export_dir = export_dirs[0]
# Finalizes the BLEURT checkpoint.
logging.info("Exporting BLEURT checkpoint to {}.".format(export_dir))
checkpoint_lib.finalize_bleurt_checkpoint(export_dir)
return export_dir
def main(_):
if FLAGS.dynamic_seq_length:
logging.info("Dynamic seq length requested - disabling Eager Mode.")
tf.disable_eager_execution()
assert FLAGS.train_set, "Need to specify a train set."
assert FLAGS.dev_set, "Need to specify a dev set."
run_finetuning_pipeline(FLAGS.train_set, FLAGS.dev_set)
if __name__ == "__main__":
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scoring functions with TF1-style lazy execution."""
import os
from bleurt import score
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
references = [
"An apple a day keeps the doctor away.",
"An apple a day keeps the doctor away."
]
candidates = [
"An apple a day keeps the doctor away.",
"An apple a day keeps doctors away."
]
ref_scores = [0.910811, 0.771989]
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
class ScoreTest(tf.test.TestCase):
def test_default_bleurt_score(self):
bleurt = score.BleurtScorer()
scores = bleurt.score(references=references, candidates=candidates)
bleurt.close()
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_tf_bleurt_score_not_eager(self):
with self.session(graph=tf.Graph()) as session:
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
bleurt_scores = bleurt_ops(references=references, candidates=candidates)
# Runs init.
init_op = tf.group(tf.global_variables_initializer(),
tf.tables_initializer())
session.run(init_op)
# Computes the BLEURT scores.
bleurt_out = session.run(bleurt_scores)
self.assertIn("predictions", bleurt_out)
self.assertEqual(bleurt_out["predictions"].shape, (2,))
self.assertAllClose(bleurt_out["predictions"], ref_scores)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEURT scoring library."""
import os
from bleurt import checkpoint as checkpoint_lib
from bleurt import encoding
from bleurt.lib import tokenizers
import numpy as np
import tensorflow as tf
logging = tf.compat.v1.logging
DEFAULT_BLEURT_BATCH_SIZE = 16
def _get_default_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt), (
"Default checkpoint not found! Are you sure the install is complete?")
return ckpt
class Predictor(object):
"""Base class for different types of predictors."""
def initialize(self):
pass
def predict(self, input_dict):
raise NotImplementedError()
def close(self):
pass
class EagerPredictor(Predictor):
"""Runs a BLEURT model in eager mode. Recommended by default."""
def __init__(self, checkpoint):
logging.info("Creating Eager Mode predictor.")
assert tf.executing_eagerly()
self.checkpoint = checkpoint
def initialize(self):
logging.info("Loading model.")
imported = tf.saved_model.load(self.checkpoint)
self._bleurt_model_ops = imported.signatures["serving_default"]
def predict(self, input_dict):
predictions = self._bleurt_model_ops(
input_ids=tf.constant(input_dict["input_ids"], dtype=tf.int64),
input_mask=tf.constant(input_dict["input_mask"], dtype=tf.int64),
segment_ids=tf.constant(input_dict["segment_ids"],
dtype=tf.int64))["predictions"].numpy()
return predictions
class LazyPredictor(Predictor):
"""Runs a BLEURT model in lazy mode, with TF1-style tf.Sessions."""
def __init__(self, checkpoint):
logging.info("Creating Lazy Mode predictor.")
logging.warn("Using Tensorflow Sessions---please call `.close()` when you "
"are are done using the BleurtScorer.")
assert not tf.executing_eagerly()
self.checkpoint = checkpoint
def initialize(self):
"""Creates the computation graph and the session."""
logging.info("Loading model.")
self._bleurt_graph = tf.Graph()
with self._bleurt_graph.as_default():
imported = tf.saved_model.load(self.checkpoint)
bleurt_model_ops = imported.signatures["serving_default"]
self._bleurt_ops = bleurt_model_ops(
input_ids=tf.compat.v1.placeholder(tf.int64, name="input_ids"),
input_mask=tf.compat.v1.placeholder(tf.int64, name="input_mask"),
segment_ids=tf.compat.v1.placeholder(tf.int64, name="segment_ids"))
init_op = tf.group(tf.compat.v1.global_variables_initializer(),
tf.compat.v1.tables_initializer())
self.session = tf.compat.v1.Session(graph=self._bleurt_graph)
self.session.run(init_op)
logging.info("Done.")
def predict(self, input_dict):
with self._bleurt_graph.as_default():
bleurt_out = self.session.run(
self._bleurt_ops, {
"input_ids:0": input_dict["input_ids"],
"input_mask:0": input_dict["input_mask"],
"segment_ids:0": input_dict["segment_ids"],
})
return bleurt_out["predictions"]
def close(self):
self.session.close()
class PythonPredictor(Predictor):
"""Wrapper around a Python function."""
def __init__(self, predict_fn):
logging.info("Creating Python-based predictor.")
self.predict_fn = predict_fn
def predict(self, input_dict):
return self.predict_fn(input_dict)
def _create_predictor(checkpoint=None, predict_fn=None):
assert checkpoint or predict_fn
if predict_fn:
return PythonPredictor(predict_fn)
if tf.executing_eagerly():
return EagerPredictor(checkpoint)
else:
return LazyPredictor(checkpoint)
# Python API for BLEURT.
class BleurtScorer(object):
"""Class for scoring the BLEURT-similarity between two sentences."""
def __init__(self, checkpoint=None, predict_fn=None):
"""Initializes the BLEURT model.
Args:
checkpoint: BLEURT checkpoint. Will default to BLEURT-tiny if None.
predict_fn: (optional) prediction function, overrides chkpt_dir. Mostly
used for testing.
Returns:
A BLEURT scorer export.
"""
if not checkpoint:
logging.info("No checkpoint specified, defaulting to BLEURT-tiny.")
checkpoint = _get_default_checkpoint()
logging.info("Reading checkpoint {}.".format(checkpoint))
self.config = checkpoint_lib.read_bleurt_config(checkpoint)
max_seq_length = self.config["max_seq_length"]
vocab_file = self.config["vocab_file"]
do_lower_case = self.config["do_lower_case"]
sp_model = self.config["sp_model"]
logging.info("Creating BLEURT scorer.")
self.tokenizer = tokenizers.create_tokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case, sp_model=sp_model)
self.max_seq_length = max_seq_length
self._predictor = _create_predictor(checkpoint, predict_fn)
self._predictor.initialize()
logging.info("BLEURT initialized.")
def score(self, *args, references=[], candidates=[], batch_size=None):
"""Scores a collection of references and candidates.
Args:
*args: dummy collection of positional arguments.
references: a list of strings.
candidates: a list of strings.
batch_size: number of pairs to process per call to `predict_fn`. A high
value makes the eval speedier but also more memory-intensive.
Returns:
A list of scores.
"""
assert not args, (
"The score function does not accept positional arguments. Please "
"specify the name of the arguments explicitly, i.e., "
"`score(references=..., candidates=...`)")
candidates, references = list(candidates), list(references)
assert len(candidates) == len(references), (
"The number of candidate sentences must match the number of "
"reference sentences.")
if not candidates:
return []
if not batch_size:
batch_size = DEFAULT_BLEURT_BATCH_SIZE
all_results = []
for i in range(0, len(candidates), batch_size):
batch_ref = references[i:i + batch_size]
batch_cand = candidates[i:i + batch_size]
input_ids, input_mask, segment_ids = encoding.encode_batch(
batch_ref, batch_cand, self.tokenizer, self.max_seq_length)
tf_input = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
predict_out = self._predictor.predict(tf_input)
batch_results = predict_out.tolist()
all_results.extend(batch_results)
assert len(all_results) == len(candidates), (
"Number of predictions does not match sentences: {} vs. {}".format(
len(all_results), len(candidates)))
return all_results
def close(self):
self._predictor.close()
class LengthBatchingBleurtScorer(BleurtScorer):
"""Experimental implementation of uniform length batching.
Inspired by BERTscore https://github.com/Tiiiger/bert_score.
A nice explanation may be found here:
https://towardsdatascience.com/divide-hugging-face-transformers-training-time-by-2-or-more-21bf7129db9q-21bf7129db9e
It is not clear to whom the technique should be attributed.
"""
DEFAULT_SCORE = -10000.0
def __init__(self, checkpoint=None, predict_fn=None):
super().__init__(checkpoint, predict_fn)
assert self.config["dynamic_seq_length"] or predict_fn, (
"The checkpoint does not support dynamic sequence lengths. Please use "
"another checkpoint, or use disable same length batching.")
def score(self, *args, references=[], candidates=[], batch_size=None):
"""Scores a collection of references and candidates.
Args:
*args: dummy collection of positional arguments.
references: a list of strings.
candidates: a list of strings.
batch_size: number of pairs to process per call to `predict_fn`. A high
value makes the eval speedier but also more memory-intensive.
Returns:
A list of scores.
"""
assert not args, (
"The score function does not accept positional arguments. Please "
"specify the name of the arguments explicitly, i.e., "
"`score(references=..., candidates=...`)")
candidates, references = list(candidates), list(references)
assert len(candidates) == len(references), (
"The number of candidate sentences must match the number of "
"reference sentences.")
n_items = len(candidates)
if not candidates:
return []
if not batch_size:
batch_size = DEFAULT_BLEURT_BATCH_SIZE
# Sorts the sentences by length.
input_ids, input_mask, segment_ids = encoding.encode_batch(
references, candidates, self.tokenizer, self.max_seq_length)
seq_lengths = np.sum(input_mask, axis=1)
sorted_indices = np.argsort(seq_lengths)
assert sorted_indices.shape[0] == n_items
all_results = np.repeat(self.DEFAULT_SCORE, n_items).astype(np.float64)
batch_lens = []
for i in range(0, n_items, batch_size):
# Gets the ids of the examples in the batch.
batch_indices = sorted_indices[i:i + batch_size]
# Computes the max sequence length.
batch_lenghts = seq_lengths[batch_indices]
batch_max_len = max(max(batch_lenghts), 1)
batch_lens.append(batch_max_len)
# Retrieves the examples and truncates the extra padding.
batch_input_ids = input_ids[batch_indices, :batch_max_len]
batch_input_mask = input_mask[batch_indices, :batch_max_len]
batch_segment_ids = segment_ids[batch_indices, :batch_max_len]
# Runs the inference.
tf_input = {
"input_ids": batch_input_ids,
"input_mask": batch_input_mask,
"segment_ids": batch_segment_ids
}
predict_out = self._predictor.predict(tf_input)
# Scatters the scores.
all_results[batch_indices] = predict_out
assert np.all(all_results > -1000), (
"Something went wrong while running the dynamic batching scorer.")
all_results = list(all_results)
assert len(all_results) == n_items, (
"Number of predictions does not match sentences: {} vs. {}".format(
len(all_results), len(candidates)))
logging.info("Average batch sequence length: {}".format(
np.mean(batch_lens)))
return all_results
class SavedModelBleurtScorer:
"""BLEURT class with in-graph string pre-processing."""
def __init__(self, saved_model=None, serialize_input=True):
"""Initializes BLEURT from a SavedModel."""
assert saved_model
self.saved_model_path = saved_model
self.serialize_input = serialize_input
logging.info("Reading checkpoint {}.".format(saved_model))
imported_model = tf.saved_model.load(saved_model)
self.bleurt_model_ops = imported_model.signatures["serving_default"]
logging.info("BLEURT initialized.")
def score(self, *args, references=[], candidates=[], batch_size=None):
"""Scores a collection of references and candidates.
Args:
*args: dummy collection of positional arguments.
references: a list of strings.
candidates: a list of strings.
batch_size: number of pairs to process per call to `predict_fn`. A high
value makes the eval speedier but also more memory-intensive.
Returns:
A list of scores.
"""
assert not args, (
"The score function does not accept positional arguments. Please "
"specify the name of the arguments explicitly, i.e., "
"`score(references=..., candidates=...`)")
candidates, references = list(candidates), list(references)
assert len(candidates) == len(references), (
"The number of candidate sentences must match the number of "
"reference sentences.")
if not candidates:
return []
if not batch_size:
batch_size = DEFAULT_BLEURT_BATCH_SIZE
all_results = []
for i in range(0, len(candidates), batch_size):
batch_ref = references[i:i + batch_size]
batch_cand = candidates[i:i + batch_size]
if self.serialize_input:
tfrecords = [
encoding.serialize_raw_example(r, c)
for (r, c) in zip(batch_ref, batch_cand)
]
predict_out = self.bleurt_model_ops(
examples=tf.constant(tfrecords))["predictions"].numpy()
else:
predict_out = self.bleurt_model_ops(
references=tf.constant(batch_ref),
candidates=tf.constant(batch_cand))["predictions"].numpy()
batch_results = predict_out.tolist()
all_results.extend(batch_results)
assert len(all_results) == len(candidates), (
"Number of predictions does not match sentences: {} vs. {}".format(
len(all_results), len(candidates)))
return all_results
def close(self):
pass
# TensorFlow API for BLEURT.
def create_bleurt_preprocessing_ops(tokenizer, max_seq_length):
"""Wraps TF ops for BLEURT preprocessing.
Args:
tokenizer: instance of lib.tokenizers.Tokenizer.
max_seq_length: BERT's max sequence length.
Returns:
A function that builds TF ops for BLEURT preprocessing.
"""
def _py_encode(references, candidates):
input_ids, input_mask, segment_ids = encoding.encode_batch(
references, candidates, tokenizer, max_seq_length)
return input_ids, input_mask, segment_ids
def bleurt_preprocessing_ops(references, candidates):
"""Builds a computation graph for BLEURT tokenization and encoding."""
return tf.numpy_function(
func=_py_encode,
inp=[references, candidates],
Tout=(tf.int64, tf.int64, tf.int64))
return bleurt_preprocessing_ops
def create_bleurt_ops(checkpoint=None, bleurt_model_fn=None):
"""Wraps a TF ops builder for BLEURT.
Args:
checkpoint: BLEURT checkpoint.
bleurt_model_fn: custom BLEURT model ops, overrides chkpt_dir. Used for
testing.
Returns:
A function that builds TF ops for BLEURT.
"""
if not checkpoint:
logging.info("No checkpoint specified, defaulting to BLEURT-tiny.")
checkpoint = _get_default_checkpoint()
assert bleurt_model_fn or tf.io.gfile.exists(checkpoint), (
"invalid path '%s'" % checkpoint)
def bleurt_ops(*args, references=None, candidates=None):
"""Builds computation graph for BLEURT.
Args:
*args: dummy positional arguments.
references: <tf.string>[...] Tensor that contains reference sentences.
candidates: <tf.string>[...] Tensor that contains candidate sentences.
Returns:
A <tf.float>[...] Tensor that contains BLEURT scores.
"""
logging.info("Creating BLEURT TF Ops...")
assert not args, (
"This function does not accept positional arguments. Please "
"specify the name of the arguments explicitly, i.e., "
"`bleurt_ops(references=..., candidates=...`).")
assert references is not None and candidates is not None
logging.info("Reading info from checkpoint {}".format(checkpoint))
config = checkpoint_lib.read_bleurt_config(checkpoint)
max_seq_length = config["max_seq_length"]
vocab_file = config["vocab_file"]
do_lower_case = config["do_lower_case"]
sp_model = config["sp_model"]
logging.info("Creating tokenizer...")
tokenizer = tokenizers.create_tokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case, sp_model=sp_model)
logging.info("Tokenizer created")
logging.info("Creating BLEURT Preprocessing Ops...")
bleurt_preprocessing_ops = create_bleurt_preprocessing_ops(
tokenizer, max_seq_length)
logging.info("Preprocessing Ops created.")
logging.info("Loading checkpoint...")
if not bleurt_model_fn:
imported = tf.saved_model.load(checkpoint)
bleurt_model_ops = imported.signatures["serving_default"]
else:
bleurt_model_ops = bleurt_model_fn
logging.info("BLEURT Checkpoint loaded")
input_ids, input_mask, segment_ids = bleurt_preprocessing_ops(
references, candidates)
out = bleurt_model_ops(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
logging.info("BLEURT TF Ops created.")
return out
return bleurt_ops
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for finetuning function."""
import os
import tempfile
from bleurt import checkpoint as checkpoint_lib
from bleurt import finetune
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
# Utils to get paths to static files.
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
def get_test_data():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
train_file = os.path.join(pkg, "test_data", "ratings_train.jsonl")
dev_file = os.path.join(pkg, "test_data", "ratings_dev.jsonl")
assert tf.io.gfile.exists(train_file)
assert tf.io.gfile.exists(dev_file)
return train_file, dev_file
class FinetuneTest(tf.test.TestCase):
def setUp(self):
# Saves default FLAG values.
super(FinetuneTest, self).setUp()
self._old_flags_val = (FLAGS.init_bleurt_checkpoint, FLAGS.model_dir,
FLAGS.num_train_steps, FLAGS.serialized_train_set,
FLAGS.serialized_dev_set, FLAGS.init_checkpoint,
FLAGS.bert_config_file, FLAGS.vocab_file,
FLAGS.max_seq_length, FLAGS.do_lower_case)
def tearDown(self):
# Restores default FLAG values.
(FLAGS.init_bleurt_checkpoint, FLAGS.model_dir, FLAGS.num_train_steps,
FLAGS.serialized_train_set, FLAGS.serialized_dev_set,
FLAGS.init_checkpoint, FLAGS.bert_config_file, FLAGS.vocab_file,
FLAGS.max_seq_length, FLAGS.do_lower_case) = self._old_flags_val
super(FinetuneTest, self).tearDown()
def test_finetune_from_bleurt(self):
checkpoint = get_test_checkpoint()
train_file, dev_file = get_test_data()
with tempfile.TemporaryDirectory() as model_dir:
# Sets new flags.
FLAGS.init_bleurt_checkpoint = checkpoint
FLAGS.model_dir = model_dir
FLAGS.num_train_steps = 1
FLAGS.serialized_train_set = os.path.join(model_dir, "train.tfrecord")
FLAGS.serialized_dev_set = os.path.join(model_dir, "dev.tfrecord")
# Runs 1 training step.
export = finetune.run_finetuning_pipeline(train_file, dev_file)
# Checks if the pipeline produced a valid BLEURT checkpoint.
self.assertTrue(tf.io.gfile.exists(export))
config = checkpoint_lib.read_bleurt_config(export)
self.assertTrue(type(config), dict)
def test_finetune_change_input_len(self):
checkpoint = get_test_checkpoint()
train_file, dev_file = get_test_data()
with tempfile.TemporaryDirectory() as model_dir:
# Sets new flags.
FLAGS.init_bleurt_checkpoint = checkpoint
FLAGS.model_dir = model_dir
FLAGS.num_train_steps = 1
FLAGS.max_seq_length = 512
FLAGS.serialized_train_set = os.path.join(model_dir, "train.tfrecord")
FLAGS.serialized_dev_set = os.path.join(model_dir, "dev.tfrecord")
# Runs 1 training step.
export = finetune.run_finetuning_pipeline(train_file, dev_file)
# Checks if the pipeline produced a valid BLEURT checkpoint.
self.assertTrue(tf.io.gfile.exists(export))
config = checkpoint_lib.read_bleurt_config(export)
self.assertTrue(type(config), dict)
def test_finetune_from_bert(self):
checkpoint = get_test_checkpoint()
train_file, dev_file = get_test_data()
with tempfile.TemporaryDirectory() as model_dir:
# Sets new flags.
FLAGS.model_dir = model_dir
FLAGS.init_checkpoint = os.path.join(checkpoint, "variables", "variables")
FLAGS.bert_config_file = os.path.join(checkpoint, "bert_config.json")
FLAGS.vocab_file = os.path.join(checkpoint, "vocab.txt")
FLAGS.do_lower_case = True
FLAGS.max_seq_length = 512
FLAGS.num_train_steps = 1
FLAGS.serialized_train_set = os.path.join(model_dir, "train.tfrecord")
FLAGS.serialized_dev_set = os.path.join(model_dir, "dev.tfrecord")
# Runs 1 training step.
export = finetune.run_finetuning_pipeline(train_file, dev_file)
# Checks if the pipeline produced a valid BLEURT checkpoint.
self.assertTrue(tf.io.gfile.exists(export))
config = checkpoint_lib.read_bleurt_config(export)
self.assertTrue(type(config), dict)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads WMT data, runs BLEURT, and compute correlation with human ratings."""
import os
from bleurt import finetune
from bleurt.wmt import db_builder
from bleurt.wmt import evaluator
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
app = tf.app
FLAGS = flags.FLAGS
flags.DEFINE_spaceseplist(
"train_years", "2015 2016",
"Years used for train and dev. The flags `dev_ratio` and `prevent_leaks`"
" specify how to split the data.")
flags.DEFINE_spaceseplist("test_years", "2017", "Years used for test.")
flags.DEFINE_string(
"data_dir", "/tmp/wmt_data",
"Directory where the train, dev, and test sets will be "
"stored.")
flags.DEFINE_bool(
"average_duplicates_on_test", True,
"Whether all the ratings for the same translation should be averaged "
"(should be set to False to replicate the results of WMT 18 and 19).")
def run_benchmark():
"""Runs the WMT Metrics Benchmark end-to-end."""
logging.info("Running WMT Metrics Shared Task Benchmark")
# Prepares the datasets.
if not tf.io.gfile.exists(FLAGS.data_dir):
logging.info("Creating directory {}".format(FLAGS.data_dir))
tf.io.gfile.mkdir(FLAGS.data_dir)
train_ratings_file = os.path.join(FLAGS.data_dir, "train_ratings.json")
dev_ratings_file = os.path.join(FLAGS.data_dir, "dev_ratings.json")
test_ratings_file = os.path.join(FLAGS.data_dir, "test_ratings.json")
for f in [train_ratings_file, dev_ratings_file, test_ratings_file]:
if tf.io.gfile.exists(f):
logging.info("Deleting existing file: {}".format(f))
tf.io.gfile.remove(f)
print("Done.")
logging.info("\n*** Creating training data. ***")
db_builder.create_wmt_dataset(train_ratings_file, FLAGS.train_years,
FLAGS.target_language)
db_builder.postprocess(train_ratings_file)
db_builder.shuffle_split(
train_ratings_file,
train_ratings_file,
dev_ratings_file,
dev_ratio=FLAGS.dev_ratio,
prevent_leaks=FLAGS.prevent_leaks)
logging.info("\n*** Creating test data. ***")
db_builder.create_wmt_dataset(test_ratings_file, FLAGS.test_years,
FLAGS.target_language)
db_builder.postprocess(
test_ratings_file, average_duplicates=FLAGS.average_duplicates_on_test)
# Trains BLEURT.
logging.info("\n*** Training BLEURT. ***")
export_dir = finetune.run_finetuning_pipeline(train_ratings_file,
dev_ratings_file)
# Runs the eval.
logging.info("\n*** Testing BLEURT. ***")
if not FLAGS.results_json:
results_json = os.path.join(FLAGS.data_dir, "results.json")
else:
results_json = FLAGS.results_json
results = evaluator.eval_checkpoint(export_dir, test_ratings_file,
results_json)
logging.info(results)
logging.info("\n*** Done. ***")
def main(_):
run_benchmark()
if __name__ == "__main__":
tf.app.run(main)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Library to build download and aggregate WMT ratings data.
More info about the datasets: https://www.statmt.org/wmt19/metrics-task.html
"""
import os
import tempfile
from bleurt.wmt import downloaders
import pandas as pd
import tensorflow.compat.v1 as tf
flags = tf.flags
app = tf.app
logging = tf.logging
FLAGS = flags.FLAGS
flags.DEFINE_spaceseplist(
"rating_years", "2015 2016",
"Years for which the ratings will be downloaded,"
" between 2015 and 2019. Ex: \"2015 2016 2017\".")
flags.DEFINE_string(
"target_language", "en",
"Two letter code for the target language. Ex: \"en\"."
"A star `*` means all languages.")
flags.DEFINE_string("target_file", "/tmp/wmt_eval.jsonl",
"Path to JSONL ratings file to be created.")
flags.DEFINE_bool(
"average_duplicates", True,
"Whether all the ratings for the same translation should be averaged "
"(set to False for the test sets of WMT'18 and '19).")
flags.DEFINE_string(
"wmt_ratings_file", None,
"[optional] Path where raw WMT ratings will be stored. Creates a temp "
"file if None.")
flags.DEFINE_string(
"temp_directory", None, "[optional] Temporary directory where WMT archives "
"will be downladed and untared.")
# Post-processing.
flags.DEFINE_float("dev_ratio", None,
"Ratio of data allocated to dev set. No split if None")
flags.DEFINE_bool(
"prevent_leaks", True,
"Prevent leaks when splitting, i.e., train and dev examples with the same "
"reference sentence.")
WMT_IMPORTERS = {
"2015": downloaders.Importer1516,
"2016": downloaders.Importer1516,
"2017": downloaders.Importer17,
"2018": downloaders.Importer18,
"2019": downloaders.Importer19
}
def create_wmt_dataset(target_file, rating_years, target_language):
"""Creates a JSONL file for a given set of years and a target language."""
logging.info("*** Downloading ratings data from WMT.")
assert target_file
assert not os.path.exists(FLAGS.target_file), \
"Target file already exists. Aborting."
assert rating_years, "No target year detected."
for year in rating_years:
assert year in WMT_IMPORTERS, "No importer for year {}.".format(year)
assert target_language
assert target_language == "*" or len(target_language) == 2, \
"target_language must be a two-letter language code or `*`."
with tempfile.TemporaryDirectory(dir=FLAGS.temp_directory) as tmpdir:
logging.info("Using tmp directory: {}".format(tmpdir))
n_records_total = 0
for year in rating_years:
logging.info("\nProcessing ratings for year {}".format(year))
tmp_file = os.path.join(tmpdir, "tmp_ratings.json")
# Builds an importer.
importer_class = WMT_IMPORTERS[year]
importer = importer_class(year, tmpdir, tmp_file)
importer.fetch_files()
lang_pairs = importer.list_lang_pairs()
logging.info("Lang pairs found:")
logging.info(" ".join(lang_pairs))
for lang_pair in lang_pairs:
if target_language != "*" and not lang_pair.endswith(target_language):
logging.info("Skipping language pair {}".format(lang_pair))
continue
logging.info("Generating records for {} and language pair {}".format(
year, lang_pair))
n_records = importer.generate_records_for_lang(lang_pair)
logging.info("Imported {} records.".format(str(n_records)))
n_records_total += n_records
logging.info("Done processing {} elements".format(n_records_total))
logging.info("Copying temp file...")
tf.io.gfile.copy(tmp_file, target_file, overwrite=True)
logging.info("Done.")
def postprocess(target_file, remove_null_refs=True, average_duplicates=True):
"""Postprocesses a JSONL file of ratings downloaded from WMT."""
logging.info("\n*** Post-processing WMT ratings {}.".format(target_file))
assert tf.io.gfile.exists(target_file), "WMT ratings file not found!"
base_file = target_file + "_raw"
tf.io.gfile.rename(target_file, base_file, overwrite=True)
logging.info("Reading and processing wmt data...")
with tf.io.gfile.GFile(base_file, "r") as f:
ratings_df = pd.read_json(f, lines=True)
# ratings_df = ratings_df[["lang", "reference", "candidate", "rating"]]
ratings_df.rename(columns={"rating": "score"}, inplace=True)
if remove_null_refs:
ratings_df = ratings_df[ratings_df["reference"].notnull()]
assert not ratings_df.empty
if average_duplicates:
logging.warning(
"*** WARNING! Averaging duplicates will collapse system names for the same candidate and reference pair, leaving only the first system name in the resulting file. This behavior is non-deterministic and may result in inaccurate results."
)
ratings_df = ratings_df.groupby(by=["lang", "candidate", "reference"]).agg({
"source": "first",
"year": "first",
"n_ratings": "first",
"system": "first",
"segment_id": "first",
"raw_rating": "mean",
"score": "mean",
}).reset_index()
ratings_df = ratings_df.sort_values(["lang", "reference", "candidate"])
logging.info("Saving clean file.")
with tf.io.gfile.GFile(target_file, "w+") as f:
ratings_df.to_json(f, orient="records", lines=True)
logging.info("Cleaning up old ratings file.")
tf.io.gfile.remove(base_file)
def _shuffle_no_leak(all_ratings_df, n_train):
"""Splits and shuffles such that there is no train/dev example with the same ref."""
def is_split_leaky(ix):
return (
all_ratings_df.iloc[ix].reference == all_ratings_df.iloc[ix -
1].reference)
assert 0 < n_train < len(all_ratings_df.index)
# Clusters the examples by reference sentence.
sentences = all_ratings_df.reference.sample(frac=1, random_state=555).unique()
sentence_to_ix = {s: i for i, s in enumerate(sentences)}
all_ratings_df["__sentence_ix__"] = [
sentence_to_ix[s] for s in all_ratings_df.reference
]
all_ratings_df = all_ratings_df.sort_values(by="__sentence_ix__")
all_ratings_df.drop(columns=["__sentence_ix__"], inplace=True)
# Moves the split point until there is no leakage.
split_ix = n_train
n_dev_sentences = len(all_ratings_df.iloc[split_ix:].reference.unique())
if n_dev_sentences == 1 and is_split_leaky(split_ix):
raise ValueError("Failed splitting data--not enough distinct dev sentences"
" to prevent leak.")
while is_split_leaky(split_ix):
split_ix += 1
if n_train != split_ix:
logging.info("Moved split point from {} to {} to prevent "
"sentence leaking".format(n_train, split_ix))
# Shuffles the train and dev sets separately.
train_ratings_df = all_ratings_df.iloc[:split_ix].copy()
train_ratings_df = train_ratings_df.sample(frac=1, random_state=555)
dev_ratings_df = all_ratings_df.iloc[split_ix:].copy()
dev_ratings_df = dev_ratings_df.sample(frac=1, random_state=555)
assert len(train_ratings_df) + len(dev_ratings_df) == len(all_ratings_df)
# Checks that there is no leakage.
train_sentences = train_ratings_df.reference.unique()
dev_sentences = dev_ratings_df.reference.unique()
tf.logging.info("Using {} and {} unique sentences for train and dev.".format(
len(train_sentences), len(dev_sentences)))
assert not bool(set(train_sentences) & set(dev_sentences))
return train_ratings_df, dev_ratings_df
def _shuffle_leaky(all_ratings_df, n_train):
"""Shuffles and splits the ratings allowing overlap in the ref sentences."""
all_ratings_df = all_ratings_df.sample(frac=1, random_state=555)
all_ratings_df = all_ratings_df.reset_index(drop=True)
train_ratings_df = all_ratings_df.iloc[:n_train].copy()
dev_ratings_df = all_ratings_df.iloc[n_train:].copy()
assert len(train_ratings_df) + len(dev_ratings_df) == len(all_ratings_df)
return train_ratings_df, dev_ratings_df
def shuffle_split(ratings_file,
train_file=None,
dev_file=None,
dev_ratio=.1,
prevent_leaks=True):
"""Splits a JSONL WMT ratings file into train/dev."""
logging.info("\n*** Splitting WMT data in train/dev.")
assert tf.io.gfile.exists(ratings_file), "WMT ratings file not found!"
base_file = ratings_file + "_raw"
tf.io.gfile.rename(ratings_file, base_file, overwrite=True)
logging.info("Reading wmt data...")
with tf.io.gfile.GFile(base_file, "r") as f:
ratings_df = pd.read_json(f, lines=True)
logging.info("Doing the shuffle / split.")
n_rows, n_train = len(ratings_df), int((1 - dev_ratio) * len(ratings_df))
logging.info("Will attempt to set aside {} out of {} rows for dev.".format(
n_rows - n_train, n_rows))
if prevent_leaks:
train_df, dev_df = _shuffle_no_leak(ratings_df, n_train)
else:
train_df, dev_df = _shuffle_leaky(ratings_df, n_train)
logging.info("Created train and dev files with {} and {} records.".format(
len(train_df), len(dev_df)))
logging.info("Saving clean file.")
if not train_file:
train_file = ratings_file + "_train"
with tf.io.gfile.GFile(train_file, "w+") as f:
train_df.to_json(f, orient="records", lines=True)
if not dev_file:
dev_file = ratings_file + "_dev"
with tf.io.gfile.GFile(dev_file, "w+") as f:
dev_df.to_json(f, orient="records", lines=True)
logging.info("Cleaning up old ratings file.")
tf.io.gfile.remove(base_file)
def main(_):
create_wmt_dataset(FLAGS.target_file, FLAGS.rating_years,
FLAGS.target_language)
postprocess(FLAGS.target_file, average_duplicates=FLAGS.average_duplicates)
if FLAGS.dev_ratio:
shuffle_split(
FLAGS.target_file,
dev_ratio=FLAGS.dev_ratio,
prevent_leaks=FLAGS.prevent_leaks)
if __name__ == "__main__":
app.run(main)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads ratings data from the WMT Metrics shared task.
More info about the datasets: https://www.statmt.org/wmt19/metrics-task.html
"""
import abc
import collections
import glob
import gzip
import itertools
import json
import os
import re
import shutil
import tarfile
import numpy as np
import six
import tensorflow.compat.v1 as tf
logging = tf.logging
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"wmt_cache_dir", None,
"[optional] Directory where downloads from the WMT Metrics Shared task"
" websites will be cached.")
WMT_LOCATIONS = {
2015: {
"eval_data": ("DAseg-wmt-newstest2015", "DAseg-wmt-newstest2015.tar.gz",
"https://www.scss.tcd.ie/~ygraham/")
},
2016: {
"eval_data": ("DAseg-wmt-newstest2016", "DAseg-wmt-newstest2016.tar.gz",
"https://www.scss.tcd.ie/~ygraham/")
},
2017: {
"submissions":
("wmt17-metrics-task-no-hybrids", "wmt17-metrics-task-package.tgz",
"http://ufallab.ms.mff.cuni.cz/~bojar/"),
"eval_data": ("newstest2017-segment-level-human",
"newstest2017-segment-level-human.tar.gz",
"https://www.scss.tcd.ie/~ygraham/")
},
2018: {
"submissions":
("wmt18-metrics-task-nohybrids", "wmt18-metrics-task-nohybrids.tgz",
"http://ufallab.ms.mff.cuni.cz/~bojar/wmt18/"),
"eval_data": ("newstest2018-humaneval", "newstest2018-humaneval.tar.gz",
"https://www.scss.tcd.ie/~ygraham/")
},
2019: {
"submissions": ("wmt19-submitted-data-v3",
"wmt19-submitted-data-v3-txt-minimal.tgz",
"http://ufallab.ms.mff.cuni.cz/~bojar/wmt19/"),
"eval_data": ("newstest2019-humaneval", "newstest2019-humaneval.tar.gz",
"https://www.scss.tcd.ie/~ygraham/")
}
}
def separate_lang_pair(lang_pair):
lang_expr = re.compile("([a-z]{2})-([a-z]{2})")
match = lang_expr.match(lang_pair)
if match:
return match.group(1), match.group(2)
else:
return None
def postprocess_segment(segment):
"""Various string post-processing necessary to clean the records."""
# Identifies NULL values.
if segment == "NO REFERENCE AVAILABLE\n":
return None
# Removes trailing \n's.
segment = segment.strip()
return segment
@six.add_metaclass(abc.ABCMeta)
class WMTImporter(object):
"""Base class for WMT Importers.
The aim of WMT importers is to fetch datafiles from the various WMT sources,
collect information (e.g., list language pairs) and aggregate them into
one big file.
"""
def __init__(self, str_year, temp_directory, target_file):
year = int(str_year[:4]) # Extracts year from the Importer string name.
self.year = year
self.location_info = WMT_LOCATIONS[year]
self.temp_directory = temp_directory
self.target_file = target_file
def fetch_files(self):
"""Downloads raw datafiles from various WMT sources."""
cache = FLAGS.wmt_cache_dir
if cache and not tf.io.gfile.exists(cache):
logging.info("Initializing cache {}".format(cache))
tf.io.gfile.makedirs(cache)
for file_type in self.location_info:
folder_name, archive_name, url_prefix = self.location_info[file_type]
url = url_prefix + archive_name
if cache:
cache_path = os.path.join(cache, archive_name)
download_path = os.path.join(self.temp_directory, archive_name)
logging.info("Checking cache {}.".format(cache_path))
if tf.io.gfile.exists(cache_path):
logging.info("Cache found, copying..")
tf.io.gfile.copy(cache_path, download_path, overwrite=True)
logging.info("Done. Untaring...")
tar = tarfile.open(download_path)
tar.extractall(path=self.temp_directory)
tar.close()
logging.info("Done.")
continue
logging.info("File not found in cache.")
logging.info("Downloading {} from {}".format(folder_name, url))
_ = tf.keras.utils.get_file(
archive_name, url, cache_subdir=self.temp_directory, extract=True)
logging.info("Done")
if cache:
logging.info("Writing to cache {}.".format(cache_path))
tf.io.gfile.copy(download_path, cache_path, overwrite=True)
logging.info("Done.")
def list_lang_pairs(self):
"""List all language pairs included in the WMT files for the target year."""
pass
def generate_records_for_lang(self, lang):
"""Consolidates all the files for a given language pair and year."""
pass
def cleanup(self):
"""Housekeeping--we want to erase all the temp files created."""
for file_type in self.location_info:
folder_name, archive_name, _ = self.location_info[file_type]
# Removes data folder
folder_path = os.path.join(self.temp_directory, folder_name)
logging.info("Removing", folder_path)
try:
shutil.rmtree(folder_path)
except OSError:
logging.info("OS Error--skipping")
# Removes downloaded archive
archive_path = os.path.join(self.temp_directory, archive_name)
logging.info("Removing", archive_path)
try:
os.remove(archive_path)
except OSError:
logging.info("OS Error--skipping")
class Importer1516(WMTImporter):
"""Importer for years 2015 and 2016."""
@staticmethod
def to_json(year, lang, source, reference, candidate, rating, seg_id, system):
"""Converts record to JSON."""
json_dict = {
"year": int(year),
"lang": lang,
"source": postprocess_segment(source),
"reference": postprocess_segment(reference),
"candidate": postprocess_segment(candidate),
"raw_rating": None,
"rating": float(rating.strip()),
"segment_id": seg_id,
"system": system,
"n_ratings": None
}
return json.dumps(json_dict)
@staticmethod
def parse_file_name(fname):
wmt_pattern = re.compile(r"^DAseg\.newstest([0-9]+)\.[a-z\-]+\.([a-z\-]+)")
match = re.match(wmt_pattern, fname)
if match:
year, lang_pair = int(match.group(1)), match.group(2)
return year, lang_pair
else:
return None, None
def get_full_folder_path(self):
"""Returns path of directory with all the extracted files."""
file_type = "eval_data"
folder_name, _, _ = self.location_info[file_type]
folder = os.path.join(self.temp_directory, folder_name)
return folder
def list_files_for_lang(self, lang):
"""Lists the full paths of all the files for a given language pair."""
year = self.year
source_file = "DAseg.newstest{}.source.{}".format(str(year), lang)
reference_file = "DAseg.newstest{}.reference.{}".format(str(year), lang)
candidate_file = "DAseg.newstest{}.mt-system.{}".format(str(year), lang)
rating_file = "DAseg.newstest{}.human.{}".format(str(year), lang)
folder = self.get_full_folder_path()
return {
"source": os.path.join(folder, source_file),
"reference": os.path.join(folder, reference_file),
"candidate": os.path.join(folder, candidate_file),
"rating": os.path.join(folder, rating_file)
}
def list_lang_pairs(self):
folder = self.get_full_folder_path()
file_names = os.listdir(folder)
file_data = [Importer1516.parse_file_name(f) for f in file_names]
lang_pairs = [lang_pair for year, lang_pair in file_data \
if year and lang_pair]
return list(set(lang_pairs))
def generate_records_for_lang(self, lang):
year = self.year
input_files = self.list_files_for_lang(lang)
# pylint: disable=g-backslash-continuation
with open(input_files["source"], "r", encoding="utf-8") as source_file, \
open(input_files["reference"], "r", encoding="utf-8") as reference_file, \
open(input_files["candidate"], "r", encoding="utf-8") as candidate_file, \
open(input_files["rating"], "r", encoding="utf-8") as rating_file:
# pylint: enable=g-backslash-continuation
n_records = 0
with open(self.target_file, "a+") as dest_file:
for source, reference, candidate, rating in itertools.zip_longest(
source_file, reference_file, candidate_file, rating_file):
example = Importer1516.to_json(year, lang, source, reference,
candidate, rating, n_records + 1, None)
dest_file.write(example)
dest_file.write("\n")
n_records += 1
logging.info("Processed {} records".format(str(n_records)))
return n_records
class Importer17(WMTImporter):
"""Importer for year 2017."""
def __init__(self, *args, **kwargs):
super(Importer17, self).__init__(*args, **kwargs)
self.lang_pairs = None
def get_folder_path(self):
"""Returns path of directory with all the extracted files."""
return self.temp_directory
def agg_ratings_path(self):
return os.path.join(self.temp_directory, "manual-evaluation",
"DA-seglevel.csv")
def segments_path(self, subset="root"):
"""Return the path to the source, reference, candidate, and raw rating segments.
Args:
subset: one if "root", "source", "reference", "candidate", or
"raw_rating".
Returns:
Path to the relevant folder.
"""
assert subset in ["root", "source", "reference", "candidate", "raw_rating"]
root_dir = os.path.join(self.temp_directory, "extracted_wmt_package")
if subset == "root":
return root_dir
root_dir = os.path.join(root_dir, "wmt17-metrics-task-no-hybrids")
if subset == "source":
return os.path.join(root_dir, "wmt17-submitted-data", "txt", "sources")
elif subset == "reference":
return os.path.join(root_dir, "wmt17-submitted-data", "txt", "references")
elif subset == "candidate":
return os.path.join(root_dir, "wmt17-submitted-data", "txt",
"system-outputs", "newstest2017")
elif subset == "raw_rating":
return os.path.join(root_dir, "newstest2017-segment-level-human")
def fetch_files(self):
"""Downloads the WMT eval files."""
# Downloads the main archive.
super(Importer17, self).fetch_files()
# Unpacks the segments.
package_path = self.get_folder_path()
segments_archive = os.path.join(package_path, "input",
"wmt17-metrics-task-no-hybrids.tgz")
with (tarfile.open(segments_archive, "r:gz")) as tar:
tar.extractall(path=self.segments_path())
logging.info("Unpacked the segments to {}.".format(self.segments_path()))
# Gets the language pair names.
ratings_path = self.agg_ratings_path()
lang_pairs = set()
with open(ratings_path, "r") as ratings_file:
for l in itertools.islice(ratings_file, 1, None):
lang = l.split(" ")[0]
assert re.match("[a-z][a-z]-[a-z][a-z]", lang)
lang_pairs.add(lang)
self.lang_pairs = list(lang_pairs)
logging.info("Done")
def list_lang_pairs(self):
"""List all language pairs included in the WMT files for the target year."""
assert self.lang_pairs
return self.lang_pairs
def get_ref_segments(self, lang):
"""Fetches source and reference translation segments for language pair."""
src_subfolder = self.segments_path("source")
ref_subfolder = self.segments_path("reference")
src_lang, tgt_lang = separate_lang_pair(lang)
src_file = "newstest2017-{src}{tgt}-src.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=src_lang)
ref_file = "newstest2017-{src}{tgt}-ref.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=tgt_lang)
src_path = os.path.join(src_subfolder, src_file)
ref_path = os.path.join(ref_subfolder, ref_file)
logging.info("Reading data from files {} and {}".format(src_path, ref_path))
with open(src_path, "r", encoding="utf-8") as f_src:
src_segments = f_src.readlines()
with open(ref_path, "r", encoding="utf-8") as f_ref:
ref_segments = f_ref.readlines()
src_segments = [postprocess_segment(s) for s in src_segments]
ref_segments = [postprocess_segment(s) for s in ref_segments]
logging.info("Read {} source and {} reference segments.".format(
len(src_segments), len(ref_segments)))
return src_segments, ref_segments
@staticmethod
def parse_submission_file_name(fname):
"""Extracts system names from the name of submission files."""
wmt_pattern = re.compile(
r"^newstest2017\.([a-zA-Z0-9\-\.]+\.[0-9]+)\.[a-z]{2}-[a-z]{2}")
match = re.match(wmt_pattern, fname)
if match:
return match.group(1)
else:
return None
def get_sys_segments(self, lang):
"""Builds a dictionary with the generated segments for each system."""
# Gets all submission file paths.
root_folder = self.segments_path("candidate")
folder = os.path.join(root_folder, lang)
all_files = os.listdir(folder)
logging.info("Reading submission files from {}".format(folder))
# Extracts the generated segments for each submission.
sys_segments = {}
for sys_file_name in all_files:
sys_name = Importer17.parse_submission_file_name(sys_file_name)
assert sys_name
sys_path = os.path.join(folder, sys_file_name)
with open(sys_path, "r", encoding="utf-8") as f_sys:
sys_lines = f_sys.readlines()
sys_lines = [postprocess_segment(s) for s in sys_lines]
sys_segments[sys_name] = sys_lines
logging.info("Read submissions from {} systems".format(
len(sys_segments.keys())))
return sys_segments
def get_raw_rating_scores(self, lang):
"""Builds a dictionary with the rating score for each segment."""
# Gets the raw ratings file path.
folder_name, _, _ = self.location_info["eval_data"]
raw_rating_path = os.path.join(self.temp_directory, folder_name,
"anon-proc-hits-seg-{}".format(lang[-2:]),
"analysis", "ad-seg-scores.csv.gz")
logging.info("Reading raw ratings from {}".format(raw_rating_path))
# Extracts the raw rating segments.
with gzip.open(raw_rating_path, "rt") as f_raw_ratings:
raw_rating_lines = f_raw_ratings.readlines()
# Each column in ratings file is separated by spaces.
raw_rating_lines = [
postprocess_segment(s).split() for s in raw_rating_lines
]
# Filter out ratings for other language pairs.
check_lang = lambda x: "-".join([x[0], x[1]]) == lang
raw_rating_lines = list(filter(check_lang, raw_rating_lines))
# Create tuple from seg_id (index 5) to raw_rating (index 7).
raw_ratings = collections.defaultdict(list)
for x in raw_rating_lines:
raw_ratings[int(x[5])].append(float(x[7]))
# If there are multiple ratings, the final rating is averaged.
for key, value in raw_ratings.items():
raw_ratings[key] = np.mean(value)
return raw_ratings
def parse_rating(self, line):
fields = line.split()
lang = fields[0]
sys_names = fields[2].split("+")
seg_id = int(fields[3])
z_score = float(fields[4])
for sys_name in sys_names:
yield lang, sys_name, seg_id, z_score
def generate_records_for_lang(self, lang):
"""Consolidates all the files for a given language pair and year."""
# Loads source, reference, system segments, and raw ratings.
src_segments, ref_segments = self.get_ref_segments(lang)
sys_segments = self.get_sys_segments(lang)
raw_rating_scores = self.get_raw_rating_scores(lang)
# Streams the rating file and performs the join on-the-fly.
ratings_file_path = self.agg_ratings_path()
logging.info("Reading file {}".format(ratings_file_path))
n_records = 0
with open(ratings_file_path, "r", encoding="utf-8") as f_ratings:
with open(self.target_file, "a+") as dest_file:
for line in itertools.islice(f_ratings, 1, None):
for parsed_line in self.parse_rating(line):
line_lang, sys_name, seg_id, z_score = parsed_line
if line_lang != lang:
continue
# The "-1" is necessary because seg_id starts counting at 1.
src_segment = src_segments[seg_id - 1]
ref_segment = ref_segments[seg_id - 1]
sys_segment = sys_segments[sys_name][seg_id - 1]
# Directly use seg_id because seg_id is key here, not an index.
raw_rating_score = raw_rating_scores[seg_id]
example = Importer18.to_json(self.year, lang, src_segment,
ref_segment, sys_segment,
raw_rating_score, z_score, seg_id,
sys_name)
dest_file.write(example)
dest_file.write("\n")
n_records += 1
logging.info(
"Done reading ratings file. {} records written.".format(n_records))
return n_records
class Importer18(WMTImporter):
"""Importer for year 2018."""
def parse_submission_file_name(self, fname):
"""Extracts system names from the name of submission files."""
wmt_pattern = re.compile(
r"^newstest2018\.([a-zA-Z0-9\-\.]+\.[0-9]+)\.[a-z]{2}-[a-z]{2}")
match = re.match(wmt_pattern, fname)
if match:
return match.group(1)
else:
return None
def parse_eval_file_name(self, fname):
"""Extracts language pairs from the names of human rating files."""
wmt_pattern = re.compile(r"^ad-seg-scores-([a-z]{2}-[a-z]{2})\.csv")
match = re.match(wmt_pattern, fname)
if match:
return match.group(1)
else:
return None
def list_lang_pairs(self):
"""List all language pairs included in the WMT files for 2018."""
folder_name, _, _ = self.location_info["eval_data"]
subfolder = "analysis"
folder = os.path.join(self.temp_directory, folder_name, subfolder)
all_files = os.listdir(folder)
cand_lang_pairs = [self.parse_eval_file_name(fname) for fname in all_files]
# We need to remove None values in cand_lang_pair:
lang_pairs = [lang_pair for lang_pair in cand_lang_pairs if lang_pair]
return list(set(lang_pairs))
def get_ref_segments(self, lang):
"""Fetches source and reference translation segments for language pair."""
folder, _, _ = self.location_info["submissions"]
src_subfolder = os.path.join("sources")
ref_subfolder = os.path.join("references")
src_lang, tgt_lang = separate_lang_pair(lang)
src_file = "newstest2018-{src}{tgt}-src.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=src_lang)
ref_file = "newstest2018-{src}{tgt}-ref.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=tgt_lang)
src_path = os.path.join(self.temp_directory, folder, src_subfolder,
src_file)
ref_path = os.path.join(self.temp_directory, folder, ref_subfolder,
ref_file)
logging.info("Reading data from files {} and {}".format(src_path, ref_path))
with open(src_path, "r", encoding="utf-8") as f_src:
src_segments = f_src.readlines()
with open(ref_path, "r", encoding="utf-8") as f_ref:
ref_segments = f_ref.readlines()
src_segments = [postprocess_segment(s) for s in src_segments]
ref_segments = [postprocess_segment(s) for s in ref_segments]
return src_segments, ref_segments
def get_sys_segments(self, lang):
"""Builds a dictionary with the generated segments for each system."""
# Gets all submission file paths.
folder_name, _, _ = self.location_info["submissions"]
subfolder = os.path.join("system-outputs", "newstest2018")
folder = os.path.join(self.temp_directory, folder_name, subfolder, lang)
all_files = os.listdir(folder)
logging.info("Reading submission files from {}".format(folder))
# Extracts the generated segments for each submission.
sys_segments = {}
for sys_file_name in all_files:
sys_name = self.parse_submission_file_name(sys_file_name)
assert sys_name
sys_path = os.path.join(folder, sys_file_name)
with open(sys_path, "r", encoding="utf-8") as f_sys:
sys_lines = f_sys.readlines()
sys_lines = [postprocess_segment(s) for s in sys_lines]
sys_segments[sys_name] = sys_lines
return sys_segments
def get_ratings_path(self, lang):
folder, _, _ = self.location_info["eval_data"]
subfolder = "analysis"
file_name = "ad-seg-scores-{}.csv".format(lang)
return os.path.join(self.temp_directory, folder, subfolder, file_name)
def parse_rating(self, rating_line):
rating_tuple = tuple(rating_line.split(" "))
# I have a feeling that the last field is the number of ratings
# but I'm not 100% sure .
sys_name, seg_id, raw_score, z_score, n_ratings = rating_tuple
seg_id = int(seg_id)
raw_score = float(raw_score)
z_score = float(z_score)
n_ratings = int(n_ratings)
return sys_name, seg_id, raw_score, z_score, n_ratings
@staticmethod
def to_json(year,
lang,
src_segment,
ref_segment,
sys_segment,
raw_score,
z_score,
seg_id,
sys_name,
n_ratings=0):
"""Converts record to JSON."""
json_dict = {
"year": year,
"lang": lang,
"source": src_segment,
"reference": ref_segment,
"candidate": sys_segment,
"raw_rating": raw_score,
"rating": z_score,
"segment_id": seg_id,
"system": sys_name,
"n_ratings": n_ratings
}
return json.dumps(json_dict)
def generate_records_for_lang(self, lang):
"""Consolidates all the files for a given language pair and year."""
# Loads source, reference and system segments.
src_segments, ref_segments = self.get_ref_segments(lang)
sys_segments = self.get_sys_segments(lang)
# Streams the rating file and performs the join on-the-fly.
ratings_file_path = self.get_ratings_path(lang)
logging.info("Reading file {}".format(ratings_file_path))
n_records = 0
with open(ratings_file_path, "r", encoding="utf-8") as f_ratings:
with open(self.target_file, "a+") as dest_file:
for line in itertools.islice(f_ratings, 1, None):
line = line.rstrip()
parsed_tuple = self.parse_rating(line)
sys_name, seg_id, raw_score, z_score, n_ratings = parsed_tuple
# Those weird rules come from the WMT 2019 DA2RR script.
# Name of the script: seglevel-ken-rr.py, in Metrics results package.
if sys_name == "UAlacant_-_NM":
sys_name = "UAlacant_-_NMT+RBMT.6722"
if sys_name == "HUMAN":
continue
if sys_name == "RBMT.6722":
continue
# The following rules were added by me to unblock WMT2019:
if sys_name == "Helsinki-NLP.6889":
sys_name = "Helsinki_NLP.6889"
if sys_name == "Facebook-FAIR.6937":
sys_name = "Facebook_FAIR.6937"
if sys_name == "Facebook-FAIR.6937":
sys_name = "Facebook_FAIR.6937"
if sys_name == "DBMS-KU-KKEN.6726":
sys_name = "DBMS-KU_KKEN.6726"
if sys_name == "Ju-Saarland.6525":
sys_name = "Ju_Saarland.6525"
if sys_name == "aylien-mt-gu-en-multilingual.6826":
sys_name = "aylien_mt_gu-en_multilingual.6826"
if sys_name == "rug-kken-morfessor.6677":
sys_name = "rug_kken_morfessor.6677"
if sys_name == "talp-upc-2019-kken.6657":
sys_name = "talp_upc_2019_kken.6657"
if sys_name == "Frank-s-MT.6127":
sys_name = "Frank_s_MT.6127"
if lang == "de-cs" and sys_name == "Unsupervised.6935":
sys_name = "Unsupervised.de-cs.6935"
if lang == "de-cs" and sys_name == "Unsupervised.6929":
sys_name = "Unsupervised.de-cs.6929"
# The "-1" is necessary because seg_id starts counting at 1.
src_segment = src_segments[seg_id - 1]
ref_segment = ref_segments[seg_id - 1]
sys_segment = sys_segments[sys_name][seg_id - 1]
if not src_segment or not sys_segment:
logging.info("* Missing value!")
logging.info("* System:" + sys_name)
logging.info("* Segment:" + str(seg_id))
logging.info("* Source segment:" + src_segment)
logging.info("* Sys segment:" + sys_segment)
logging.info("* Parsed line:" + line)
logging.info("* Lang:" + lang)
example = Importer18.to_json(self.year, lang, src_segment,
ref_segment, sys_segment, raw_score,
z_score, seg_id, sys_name, n_ratings)
dest_file.write(example)
dest_file.write("\n")
n_records += 1
logging.info("Done reading ratings file")
return n_records
class Importer19(Importer18):
"""Importer for WMT19 Metrics challenge."""
def parse_rating(self, rating_line):
rating_tuple = tuple(rating_line.split(" "))
# I have a feeling that the last field is the number of ratings
# but I'm not 100% sure.
sys_name, seg_id, raw_score, z_score, n_ratings = rating_tuple
# For some reason, all the systems for pair zh-en have an extra suffix.
if sys_name.endswith("zh-en"):
sys_name = sys_name[:-6]
seg_id = int(seg_id)
raw_score = float(raw_score)
z_score = float(z_score)
n_ratings = int(n_ratings)
return sys_name, seg_id, raw_score, z_score, n_ratings
def parse_submission_file_name(self, fname):
"""Extracts system names from the name of submission files."""
# I added those rules to unblock the pipeline.
if fname == "newstest2019.Unsupervised.de-cs.6929.de-cs":
return "Unsupervised.de-cs.6929"
elif fname == "newstest2019.Unsupervised.de-cs.6935.de-cs":
return "Unsupervised.de-cs.6935"
wmt_pattern = re.compile(
r"^newstest2019\.([a-zA-Z0-9\-\.\_\+]+\.[0-9]+)\.[a-z]{2}-[a-z]{2}")
match = re.match(wmt_pattern, fname)
if match:
return match.group(1)
else:
return None
def list_lang_pairs(self):
"""List all language pairs included in the WMT files for 2019."""
folder_name, _, _ = self.location_info["eval_data"]
folder = os.path.join(self.temp_directory, folder_name, "*", "analysis",
"ad-seg-scores-*.csv")
all_full_paths = glob.glob(folder)
all_files = [os.path.basename(f) for f in all_full_paths]
cand_lang_pairs = [self.parse_eval_file_name(fname) for fname in all_files]
# We need to remove None values in cand_lang_pair:
lang_pairs = [lang_pair for lang_pair in cand_lang_pairs if lang_pair]
return list(set(lang_pairs))
def get_ratings_path(self, lang):
folder, _, _ = self.location_info["eval_data"]
# The pair zh-en has two versions in the WMT 2019 human eval folder.
if lang == "zh-en":
path = os.path.join(self.temp_directory, folder,
"turkle-sntlevel-humaneval-newstest2019", "analysis",
"ad-seg-scores-zh-en.csv")
return path
file_name = "ad-seg-scores-{}.csv".format(lang)
folder = os.path.join(self.temp_directory, folder, "*", "analysis",
"ad-seg-scores-*.csv")
all_files = glob.glob(folder)
for cand_file in all_files:
if cand_file.endswith(file_name):
return cand_file
raise ValueError("Can't find ratings for lang {}".format(lang))
def get_ref_segments(self, lang):
"""Fetches source and reference translation segments for language pair."""
folder, _, _ = self.location_info["submissions"]
src_subfolder = os.path.join("txt", "sources")
ref_subfolder = os.path.join("txt", "references")
src_lang, tgt_lang = separate_lang_pair(lang)
src_file = "newstest2019-{src}{tgt}-src.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=src_lang)
ref_file = "newstest2019-{src}{tgt}-ref.{lang}".format(
src=src_lang, tgt=tgt_lang, lang=tgt_lang)
src_path = os.path.join(self.temp_directory, folder, src_subfolder,
src_file)
ref_path = os.path.join(self.temp_directory, folder, ref_subfolder,
ref_file)
logging.info("Reading data from files {} and {}".format(src_path, ref_path))
with open(src_path, "r", encoding="utf-8") as f_src:
src_segments = f_src.readlines()
with open(ref_path, "r", encoding="utf-8") as f_ref:
ref_segments = f_ref.readlines()
src_segments = [postprocess_segment(s) for s in src_segments]
ref_segments = [postprocess_segment(s) for s in ref_segments]
return src_segments, ref_segments
def get_sys_segments(self, lang):
"""Builds a dictionary with the generated segments for each system."""
# Gets all submission file paths.
folder_name, _, _ = self.location_info["submissions"]
subfolder = os.path.join("txt", "system-outputs", "newstest2019")
folder = os.path.join(self.temp_directory, folder_name, subfolder, lang)
all_files = os.listdir(folder)
logging.info("Reading submission files from {}".format(folder))
# Extracts the generated segments for each submission.
sys_segments = {}
for sys_file_name in all_files:
sys_name = self.parse_submission_file_name(sys_file_name)
assert sys_name
sys_path = os.path.join(folder, sys_file_name)
with open(sys_path, "r", encoding="utf-8") as f_sys:
sys_lines = f_sys.readlines()
sys_lines = [postprocess_segment(s) for s in sys_lines]
sys_segments[sys_name] = sys_lines
return sys_segments
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes correlation betweem BLEURT and human ratings on a test file from WMT."""
import collections
import json
from bleurt import score
import numpy as np
import pandas as pd
from scipy import stats
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
app = tf.app
FLAGS = flags.FLAGS
flags.DEFINE_string("candidate_checkpoint", None,
"Path to BLEURT bleurt_checkpoint to benchmark.")
flags.DEFINE_string(
"candidate_predictions_file", None,
"Path to WMT-style predictions file. See "
"http://www.statmt.org/wmt19/metrics-task.html for a description of the "
"format.")
flags.DEFINE_string("test_file", None,
"Path to JSONL ratings file to be used as test data.")
flags.DEFINE_string("results_json", None,
"JSON file where the results will be written.")
flags.DEFINE_integer(
"sample_size", None,
"Samples N items from the human ratings without replacement.")
flags.DEFINE_boolean("to_english", False, "To-English language pairs only.")
def kendall(pred, ref):
return stats.kendalltau(pred, ref)[0]
def pearson(pred, ref):
return stats.pearsonr(pred, ref)[0]
def spearman(pred, ref):
return stats.spearmanr(pred, ref)[0]
def grouped_wmt_kendall(df, year=2019, threshold=25):
"""Groups translations by source and computes WMT's Kendall variant."""
tf.logging.debug("Subset size: {}".format(len(df.index)))
n_sentences = df["reference"].nunique()
tf.logging.debug("Number of reference sentences: {}".format(n_sentences))
df = df.dropna(subset=["bleurt"])
groups = df.groupby(["reference"])
agreement, n_pairs, n_skipped = 0, 0, 0
n_kept_groups, n_skipped_groups = 0, 0
for _, group_df in groups:
if len(group_df.index) == 1:
n_skipped_groups += 1
continue
n_kept_groups += 1
local_agreement, local_n_pairs, local_n_skipped = wmt_kendall(
group_df, year, threshold, return_counts=True)
if local_agreement is None or local_n_pairs is None or n_skipped is None:
# return None
continue
agreement += local_agreement
n_pairs += local_n_pairs
n_skipped += local_n_skipped
if n_pairs == 0:
tf.logging.info(
"Zero pairs found, skipping. If this behavior is unexpected, "
"please ensure that raw ratings exist exist.")
return None
tf.logging.debug("Found {} agreements among {} pairs".format(
agreement, n_pairs))
tf.logging.debug("{} pairs skipped".format(n_skipped))
tf.logging.debug("{} groups n_kept_groups".format(n_kept_groups))
tf.logging.debug("{} groups skipped".format(n_skipped_groups))
return agreement * 1.0 / n_pairs
def wmt_kendall(df, year=2018, threshold=25, return_counts=False):
"""Implement the variant of Kendall Tau used in the WMT metrics shared task."""
raw_ratings = df["raw_rating"].to_numpy()
predictions = df["bleurt"].to_numpy()
finite_ratings = np.isfinite(raw_ratings)
raw_ratings = raw_ratings[finite_ratings]
predictions = predictions[finite_ratings]
if not raw_ratings.size:
tf.logging.warn("Cannot compute WMT Kendall variant on null raw ratings.")
if return_counts:
return None, None, None
else:
return None
if year < 2018:
ties_matrix = WMT17_TIES_MATRIX
else:
ties_matrix = WMT18_TIES_MATRIX
agreement = 0
n_pairs = 0
n_skipped = 0
n_total = 0
n_items = len(raw_ratings)
assert len(predictions) == n_items
for i in range(n_items - 1):
for j in range(i + 1, n_items):
n_total += 1
if (raw_ratings[i] == raw_ratings[j] or threshold is not None and
abs(raw_ratings[i] - raw_ratings[j]) < threshold):
n_skipped += 1
continue
if raw_ratings[i] < raw_ratings[j]:
human_rank = "<"
elif raw_ratings[i] == raw_ratings[j]:
human_rank = "="
elif raw_ratings[i] > raw_ratings[j]:
human_rank = ">"
else:
raise ValueError("Wrong raw_ratings values: {}, {}".format(
raw_ratings[i], raw_ratings[j]))
if predictions[i] < predictions[j]:
pred_rank = "<"
elif predictions[i] == predictions[j]:
pred_rank = "="
elif predictions[i] > predictions[j]:
pred_rank = ">"
else:
raise ValueError("Wrong prediction values: {}, {}".format(
predictions[i], predictions[j]))
increment = ties_matrix[(human_rank, pred_rank)]
assert increment is not None
agreement += increment
n_pairs += 1
assert n_pairs + n_skipped == n_total
if return_counts:
return agreement, n_pairs, n_skipped
tf.logging.debug("Found {} agreements among {} pairs".format(
agreement, n_pairs))
return agreement * 1.0 / n_pairs
# The WMT Metrics shared tasks used different weighing schemes in 2017 and 2018.
WMT17_TIES_MATRIX = {
("<", "<"): 1,
("<", "="): 0,
("<", ">"): -1,
("=", "<"): None,
("=", "="): None,
("=", ">"): None,
(">", "<"): -1,
(">", "="): 0,
(">", ">"): 1
}
WMT18_TIES_MATRIX = {
("<", "<"): 1,
("<", "="): -1,
("<", ">"): -1,
("=", "<"): None,
("=", "="): None,
("=", ">"): None,
(">", "<"): -1,
(">", "="): -1,
(">", ">"): 1
}
METRICS = {
"kendall": kendall,
"pearson": pearson,
"spearman": spearman,
"wmt_da_rr_kendall": grouped_wmt_kendall
}
THRESHOLDS = {2015: 0, 2016: 0, 2017: 0, 2018: 25, 2019: 25, 2020: 25}
# In WMT20, additional systems were removed based on human judgments.
# This is from the Appendix A from the paper
# http://www.statmt.org/wmt20/pdf/2020.wmt-1.77.pdf
# Thanks to Sweeta Agrawal and George Foster for collecting.
WMT_OUTLIERS = {
2020: {
"cs-en": ["zlabs-nlp.1149", "CUNI-DocTransformer.1457"],
"de-en": ["yolo.1052", "zlabs-nlp.1153", "WMTBiomedBaseline.387"],
"iu-en": ["NiuTrans.1206", "Facebook_AI.729"],
"ja-en": ["Online-G.1564", "zlabs-nlp.66", "Online-Z.1640"],
"pl-en": ["zlabs-nlp.1162"],
"ru-en": ["zlabs-nlp.1164"],
"ta-en": ["Online-G.1568", "TALP_UPC.192"],
"zh-en": ["WMTBiomedBaseline.183"],
"en-cs": ["zlabs-nlp.1151", "Online-G.1555"],
"en-de": ["zlabs-nlp.179", "WMTBiomedBaseline.388", "Online-G.1556"],
"en-iu": ["UEDIN.1281", "OPPO.722", "UQAM_TanLe.521"],
"en-pl": ["Online-Z.1634", "zlabs-nlp.180", "Online-A.1576"],
"en-ta": ["TALP_UPC.1049", "SJTU-NICT.386", "Online-G.1561"],
# The list for en-ja was omitted from the appendix; these systems were
# guessed through trial-and-error to match the scores in table 6.
"en-ja": ["Online-G.1557", "SJTU-NICT.370"],
}
}
def eval_checkpoint(export_dir, test_file, results_json=None):
"""Runs evaluation on a BLEURT checkpoint."""
def _scoring_fun(test_df):
scorer = score.BleurtScorer(export_dir)
return scorer.score(
references=test_df.reference.tolist(),
candidates=test_df.candidate.tolist())
return run_eval(_scoring_fun, test_file, results_json)
def eval_tf_export(export_dir, test_file, results_json=None):
"""Runs evaluation on a SavedModel with in-graph tokenization."""
bleurt_scorer = score.SavedModelBleurtScorer(export_dir)
def _scoring_fun(test_df):
scores = bleurt_scorer.score(
references=test_df.reference, candidates=test_df.candidate)
return scores
return run_eval(_scoring_fun, test_file, results_json)
def eval_prediction_file(prediction_file,
test_file,
results_json=None,
wmt_format=True,
exclude_sys=None):
"""Runs evaluation on a prediction file, possibly in WMT format."""
tf.logging.info("Evaluating file {}".format(prediction_file))
assert tf.io.gfile.exists(prediction_file), "Could not find file."
def _predict_from_file(test_data_df, filter_newstest=True):
tf.logging.info("Reading input file.")
if wmt_format:
# Reads a file with format WMT 15 to 19.
col_names = [
"metric", "lang", "corpus", "system", "segment_id", "prediction",
"ens", "url"
]
predictions_df = pd.read_csv(
tf.gfile.Open(prediction_file), sep="\t", names=col_names)
if filter_newstest:
predictions_df = predictions_df.loc[
predictions_df["corpus"].str.startswith("newstest"), :]
else:
# Reads a file with free TSV format. The expectation is that the file
# contains column names, including "system", "segment_id", and "lang".
predictions_df = pd.read_csv(tf.gfile.Open(prediction_file), sep="\t")
for col in ["lang", "system", "segment_id"]:
assert col in predictions_df.columns
tf.logging.info("Done reading input file.")
tf.logging.info(predictions_df.head())
# Joins with ratings data.
join_df = test_data_df.merge(
predictions_df,
how="left",
on=["system", "segment_id", "lang"],
sort=False)
assert join_df["score"].count() == test_data_df["score"].count()
assert np.all((
join_df["score"].to_numpy() == test_data_df["score"].to_numpy())
| np.isnan(join_df["score"].to_numpy()))
predictions = join_df["prediction"]
n_nulls = predictions.isna().sum()
if n_nulls > 0:
tf.logging.warning("Found {} nulls in baseline".format(n_nulls))
return predictions.tolist()
return run_eval(_predict_from_file, test_file, results_json, exclude_sys)
def run_eval(scoring_fun, test_file, results_json=None, exclude_sys=None):
"""Computes correlations between BLEURT and human ratings on all pairs of languages."""
assert tf.io.gfile.exists(test_file), "Could not find test file."
logging.info("Reading test set.")
with tf.io.gfile.GFile(test_file, "r") as f:
test_df = pd.read_json(f, lines=True)
# test_df = test_df[np.isfinite(test_df["score"])]
if FLAGS.sample_size:
logging.info("Sampling {} items.".format(str(FLAGS.sample_size)))
test_df = test_df.sample(n=FLAGS.sample_size, random_state=55555)
if FLAGS.to_english:
logging.info("Filtering out non-English.")
test_df = test_df[test_df["lang"].str.endswith("en")]
n_items = len(test_df)
for col in ["year", "lang", "reference", "candidate", "score"]:
assert col in test_df.columns, \
"Field {} not found".format(col)
# Weighting schemes differ across years. Permit only one year at a time.
assert len(test_df["year"].unique()) == 1
logging.info("Read {} examples.".format(n_items))
logging.info("Obtaining predictions.")
bleurt_scores = scoring_fun(test_df)
assert len(bleurt_scores) == n_items
logging.info("Done.")
test_df["bleurt"] = bleurt_scores
if exclude_sys:
tf.logging.info("Excluding systems matching: {}.".format(exclude_sys))
tf.logging.info("Num rows before: {}.".format(len(test_df.index)))
test_df = test_df[~test_df["system"].str.match(exclude_sys)]
tf.logging.info("Num rows after: {}.".format(len(test_df.index)))
logging.info("Computing correlations.")
year = test_df["year"].unique()[0]
grouped_by_lang = test_df.groupby(by=["lang"])
results = collections.defaultdict(dict)
for group_name, group_df in grouped_by_lang:
logging.info("* {}:".format(group_name))
systems = group_df["system"].unique()
tf.logging.info("sytems: {}".format(" ".join(systems)))
# Segment-level correlations.
predictions = group_df["bleurt"].to_numpy()
reference = group_df["score"].to_numpy()
finite_refs = np.isfinite(reference)
predictions = predictions[finite_refs]
reference = reference[finite_refs]
for metric_name in METRICS:
if metric_name == "wmt_da_rr_kendall":
metric_value = METRICS[metric_name](group_df, year, THRESHOLDS[year])
else:
metric_value = METRICS[metric_name](predictions, reference)
logging.info("** {}: {}".format(metric_name, metric_value))
results[group_name][metric_name] = metric_value
# System-level correlation.
grouped_by_system = group_df.groupby("system").agg({
"bleurt": "mean",
"score": "mean"
}).reset_index()
grouped_by_system = grouped_by_system[np.isfinite(
grouped_by_system["score"])]
predictions = grouped_by_system["bleurt"].to_numpy()
reference = grouped_by_system["score"].to_numpy()
for metric_name in ["kendall", "pearson", "spearman"]:
metric_value = METRICS[metric_name](predictions, reference)
logging.info("** sys-{}: {}".format(metric_name, metric_value))
results[group_name]["sys-" + metric_name] = metric_value
# System-level, excluding outliers.
if year not in WMT_OUTLIERS:
continue
if group_name in WMT_OUTLIERS[year]:
outliers = WMT_OUTLIERS[year][group_name]
else:
outliers = []
grouped_by_system_nooutl = grouped_by_system[~grouped_by_system["system"]
.isin(outliers)]
predictions = grouped_by_system_nooutl["bleurt"].to_numpy()
reference = grouped_by_system_nooutl["score"].to_numpy()
for metric_name in ["kendall", "pearson", "spearman"]:
metric_value = METRICS[metric_name](predictions, reference)
logging.info("** sys-{}-nooutl: {}".format(metric_name, metric_value))
results[group_name]["sys-nooutl-" + metric_name] = metric_value
# System-level, top 10.
grouped_by_system_topk = grouped_by_system.nlargest(10, "score")
predictions = grouped_by_system_topk["bleurt"].to_numpy()
reference = grouped_by_system_topk["score"].to_numpy()
for metric_name in ["kendall", "pearson", "spearman"]:
metric_value = METRICS[metric_name](predictions, reference)
logging.info("** sys-{}-top10: {}".format(metric_name, metric_value))
results[group_name]["sys-top10-" + metric_name] = metric_value
if results_json:
logging.info("Writing the results to disk")
with tf.io.gfile.GFile(results_json, mode="w+") as out_file:
out_json = json.dumps(results)
out_file.write(out_json)
logging.info("Done.")
return results
def main(_):
if FLAGS.candidate_checkpoint:
eval_checkpoint(FLAGS.candidate_checkpoint, FLAGS.test_file,
FLAGS.results_json)
if FLAGS.candidate_predictions_file:
eval_prediction_file(FLAGS.candidate_predictions_file, FLAGS.test_file,
FLAGS.results_json)
if __name__ == "__main__":
tf.app.run(main)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment utilities from the Google Language Team.
Source: https://github.com/google-research/language/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
flags.DEFINE_integer("batch_size", 16, "Batch size.")
flags.DEFINE_integer("eval_batch_size", 16, "Evaluation batch size.")
flags.DEFINE_string("model_dir", None, "Model directory")
flags.DEFINE_integer("tf_random_seed", None, "Random seed for tensorflow")
flags.DEFINE_integer("num_eval_steps", None,
"Number of steps to take during evaluation.")
flags.DEFINE_integer("num_train_steps", 25000,
"Number of steps to take during training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"Number of steps between checkpoint saves.")
flags.DEFINE_integer("eval_throttle_secs", 0,
"Minimum number of seconds to wait between evaluations")
flags.DEFINE_integer("eval_start_delay_secs", 0,
"Number of seconds to wait before starting evaluations.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"Max number of checkpoints to keep")
FLAGS = flags.FLAGS
def run_experiment(model_fn,
train_input_fn,
eval_input_fn,
additional_eval_specs=None,
exporters=None):
"""Run experiment."""
run_config = tf_estimator.RunConfig(
model_dir=FLAGS.model_dir,
tf_random_seed=FLAGS.tf_random_seed,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max)
estimator = tf_estimator.Estimator(
config=run_config, model_fn=model_fn, model_dir=FLAGS.model_dir)
train_spec = tf_estimator.TrainSpec(
input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
assert not additional_eval_specs, (
"Multiple eval sets are not supported with default experiment runner.")
eval_spec = tf_estimator.EvalSpec(
name="default",
input_fn=eval_input_fn,
exporters=exporters,
start_delay_secs=FLAGS.eval_start_delay_secs,
throttle_secs=FLAGS.eval_throttle_secs,
steps=FLAGS.num_eval_steps)
tf.logging.set_verbosity(tf.logging.INFO)
tf_estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes.
Branched from https://github.com/google-research/bert.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow.compat.v1 as tf
logging = tf.logging
# This used to be a flag in the original BERT version.
PRESERVE_UNUSED_TOKENS = False
_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
def preserve_token(token, vocab):
"""Returns True if the token should forgo tokenization and be preserved."""
if not PRESERVE_UNUSED_TOKENS:
return False
if token not in vocab:
return False
return bool(_UNUSED_TOKEN_RE.search(token))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
if token not in vocab:
vocab[token] = len(vocab)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, vocab=self.vocab)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=tuple()):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
vocab: A container of tokens to not mutate during tokenization.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates).
Branched from https://github.com/google-research/bert.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions.
Branched from https://github.com/google-research/bert.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf_slim.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def dense_layer_3d(input_tensor,
num_attention_heads,
size_per_head,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
size_per_head: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[last_dim, num_attention_heads * size_per_head],
initializer=initializer)
w = tf.reshape(w, [last_dim, num_attention_heads, size_per_head])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * size_per_head],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, size_per_head])
ret = tf.einsum("abc,cde->abde", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFNH,NHD->BFD", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel", shape=[last_dim, output_size], initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("abc,cd->abd", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with tf.einsum as follows:
Input_tensor: [BFD]
Wq, Wk, Wv: [DNH]
Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)
K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)
V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)
attention_scores:[BNFT] = einsum('BFNH,BTNH>BNFT', Q, K) / sqrt(H)
attention_probs:[BNFT] = softmax(attention_scores)
context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)
Wout:[DNH]
Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
query_layer = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act,
"query")
# `key_layer` = [B, T, N, H]
key_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act,
"key")
# `value_layer` = [B, T, N, H]
value_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act,
"value")
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_layer, query_layer)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_layer)
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
prev_output = input_tensor
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output, hidden_size,
num_attention_heads, attention_head_size,
create_initializer(initializer_range), None, "dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output, intermediate_size,
create_initializer(initializer_range), intermediate_act_fn, "dense")
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = dense_layer_2d(intermediate_output, hidden_size,
create_initializer(initializer_range),
None, "dense")
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper classes for various types of tokenization."""
from bleurt.lib import bert_tokenization
import tensorflow.compat.v1 as tf
import sentencepiece as spm
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
class Tokenizer(object):
"""Base class for WordPiece and TokenPiece tokenizers."""
def tokenize(self):
raise NotImplementedError()
def tokens_to_id(self):
raise NotImplementedError()
class WordPieceTokenizer(Tokenizer):
"""Wrapper around BERT's FullTokenizer."""
def __init__(self, vocab_file, do_lower_case):
logging.info("Creating WordPiece tokenizer.")
self.vocab_file = vocab_file
self.do_lower_case = do_lower_case
self._tokenizer = bert_tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
logging.info("WordPiece tokenizer instantiated.")
def tokenize(self, text):
return self._tokenizer.tokenize(text)
def convert_tokens_to_ids(self, tokens):
return self._tokenizer.convert_tokens_to_ids(tokens)
class SentencePieceTokenizer(Tokenizer):
"""Wrapper around SentencePiece tokenizer."""
def __init__(self, sp_model):
logging.info("Creating SentencePiece tokenizer.")
self._sp_model_path = sp_model + ".model"
logging.info("Will load model: {}.".format(self._sp_model_path))
self._sp_model = spm.SentencePieceProcessor()
self._sp_model.Load(self._sp_model_path)
self.vocab_size = self._sp_model.GetPieceSize()
logging.info("SentencePiece tokenizer created.")
def tokenize(self, text):
return self._sp_model.EncodeAsPieces(text)
def convert_tokens_to_ids(self, tokens):
return [self._sp_model.PieceToId(token) for token in tokens]
def create_tokenizer(vocab_file=None, do_lower_case=None, sp_model=None):
"""Factory function for tokenizers."""
if vocab_file and do_lower_case is not None:
return WordPieceTokenizer(vocab_file, do_lower_case)
elif sp_model:
logging.info("Creating SentencePiece tokenizer.")
return SentencePieceTokenizer(sp_model)
else:
raise ValueError("Cannot determine the type of Tokenizer to build from "
"arguments.")
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
from functools import partial
from absl.testing import parameterized
from big_vision import utils
import chex
import flax
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
NDEV = 4
def setUpModule():
chex.set_n_cpu_devices(NDEV)
class PadShardUnpadTest(chex.TestCase, tf.test.TestCase):
BATCH_SIZES = [NDEV, NDEV + 1, NDEV - 1, 5 * NDEV, 5 * NDEV + 1, 5 * NDEV - 1]
DTYPES = [np.float32, np.uint8, jax.numpy.bfloat16, np.int32]
def tearDown(self):
chex.clear_trace_counter()
super().tearDown()
@parameterized.product(dtype=DTYPES, bs=BATCH_SIZES)
def test_basics(self, dtype, bs):
# Just tests that basic calling works without exploring caveats.
@partial(utils.pad_shard_unpad, static_argnums=())
def add(a, b):
return a + b
x = np.arange(bs, dtype=dtype)
y = add(x, 10*x)
chex.assert_type(y.dtype, x.dtype)
np.testing.assert_allclose(np.float64(y), np.float64(x + 10*x))
@parameterized.parameters(DTYPES)
def test_min_device_batch_avoids_recompile(self, dtype):
@partial(utils.pad_shard_unpad, static_argnums=())
@jax.jit
@chex.assert_max_traces(n=1)
def add(a, b):
return a + b
chex.clear_trace_counter()
for bs in self.BATCH_SIZES:
x = np.arange(bs, dtype=dtype)
y = add(x, 10*x, min_device_batch=9) # pylint: disable=unexpected-keyword-arg
chex.assert_type(y.dtype, x.dtype)
np.testing.assert_allclose(np.float64(y), np.float64(x + 10*x))
@parameterized.product(dtype=DTYPES, bs=BATCH_SIZES)
def test_static_argnum(self, dtype, bs):
@partial(utils.pad_shard_unpad, static_argnums=(1,))
def add(a, b):
return a + b
x = np.arange(bs, dtype=dtype)
y = add(x, 10)
chex.assert_type(y.dtype, x.dtype)
np.testing.assert_allclose(np.float64(y), np.float64(x + 10))
@parameterized.product(dtype=DTYPES, bs=BATCH_SIZES)
def test_static_argnames(self, dtype, bs):
# In this test, leave static_argnums at the default value too, in order to
# test the default/most canonical path where `params` are the first arg.
@partial(utils.pad_shard_unpad, static_argnames=('b',))
def add(params, a, *, b):
return params * a + b
x = np.arange(bs, dtype=dtype)
y = add(5, x, b=10)
chex.assert_type(y.dtype, x.dtype)
np.testing.assert_allclose(np.float64(y), np.float64(5 * x + 10))
class TreeTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.d1 = {'w1': 1, 'w2': 2, 'w34': (3, 4)}
self.d1_flat = [1, 2]
self.d1_flat_jax = jax.tree_flatten(self.d1)[0]
self.d1_named_flat = [('w1', 1), ('w2', 2), ('w34/0', 3), ('w34/1', 4)]
self.d1_named_flat_jax = [('w1', 1), ('w2', 2), ('w34/0', 3), ('w34/1', 4)]
self.d2 = {'conv1': {'kernel': 0, 'bias': 1},
'conv2': {'kernel': 2, 'bias': 3}}
self.d2_flat = [1, 0, 3, 2]
self.d2_flat_jax = jax.tree_flatten(self.d2)[0]
self.d2_named_flat = [('conv1/bias', 1), ('conv1/kernel', 0),
('conv2/bias', 3), ('conv2/kernel', 2)]
self.d2_named_flat_jax = [('conv1/bias', 1), ('conv1/kernel', 0),
('conv2/bias', 3), ('conv2/kernel', 2)]
self.d2_named_flat_inner = [
('conv1/bias', 1), ('conv1/kernel', 0), ('conv1', self.d2['conv1']),
('conv2/bias', 3), ('conv2/kernel', 2), ('conv2', self.d2['conv2']),
('', self.d2),
]
# This is a very important testcase that checks whether we correctly
# recover jax' traversal order, even though our custom traversal may not
# be consistent with jax' traversal order. In particular, jax traverses
# FlaxStruct in the order of attribute definition, while our custom
# traversal is alphabetical.
@flax.struct.dataclass
class FlaxStruct():
v3: float
v2: int
v1: str
self.d3 = {'a': 0, 'flax': FlaxStruct(2.0, 1, 's')}
self.d3_flat = [0, 1, 2.0, 's']
self.d3_flat_jax = jax.tree_flatten(self.d3)[0]
self.d3_named_flat = [
('a', 0), ('flax/v1', 's'), ('flax/v2', 1), ('flax/v3', 2.0)]
self.d3_named_flat_jax = [
('a', 0), ('flax/v3', 2.0), ('flax/v2', 1), ('flax/v1', 's')]
def test_traverse_with_names(self):
names_and_vals = list(utils._traverse_with_names(self.d1))
self.assertEqual(names_and_vals, self.d1_named_flat)
names_and_vals = list(utils._traverse_with_names(self.d2))
self.assertEqual(names_and_vals, self.d2_named_flat)
names_and_vals = list(utils._traverse_with_names(
self.d2, with_inner_nodes=True))
self.assertEqual(names_and_vals, self.d2_named_flat_inner)
names_and_vals = list(utils._traverse_with_names(self.d3))
self.assertEqual(names_and_vals, self.d3_named_flat)
def test_tree_flatten_with_names(self):
names_and_vals = utils.tree_flatten_with_names(self.d1)[0]
self.assertEqual(names_and_vals, self.d1_named_flat_jax)
self.assertEqual([x for _, x in names_and_vals], self.d1_flat_jax)
names_and_vals = utils.tree_flatten_with_names(self.d2)[0]
self.assertEqual(names_and_vals, self.d2_named_flat_jax)
self.assertEqual([x for _, x in names_and_vals], self.d2_flat_jax)
names_and_vals = utils.tree_flatten_with_names(self.d3)[0]
self.assertEqual(names_and_vals, self.d3_named_flat_jax)
self.assertEqual([x for _, x in names_and_vals], self.d3_flat_jax)
def test_tree_map_with_names(self):
d1 = utils.tree_map_with_names(
lambda name, x: -x if 'w2' in name else x, self.d1)
self.assertEqual(d1, {'w1': 1, 'w2': -2, 'w34': (3, 4)})
d1 = utils.tree_map_with_names(
lambda name, x1, x2: x1 + x2 if 'w2' in name else x1, self.d1, self.d1)
self.assertEqual(d1, {'w1': 1, 'w2': 4, 'w34': (3, 4)})
def test_recover_tree(self):
keys = ['a/b', 'a/c/x', 'a/c/y', 'd']
values = [0, 1, 2, 3]
self.assertEqual(utils.recover_tree(keys, values),
{'a': {'b': 0, 'c': {'x': 1, 'y': 2}}, 'd': 3})
def test_make_mask_trees(self):
F, T = False, True # pylint: disable=invalid-name
tree = {'a': {'b': 0, 'x': 1}, 'b': {'x': 2, 'y': 3}}
msk1 = {'a': {'b': F, 'x': T}, 'b': {'x': T, 'y': F}}
msk2 = {'a': {'b': F, 'x': F}, 'b': {'x': F, 'y': T}}
# Note that 'b' matches '^b' only and not '.*/b'.
# Also note that "b/x" is matched by rule 1 only (because it comes first).
self.assertEqual(
utils.make_mask_trees(tree, ('.*/x', 'b/.*')), [msk1, msk2])
def test_tree_get(self):
tree = {'a': {'b': 0, 'x': 1}, 'b': {'x': 2, 'y': 3}}
self.assertEqual(utils.tree_get(tree, 'a/b'), 0)
self.assertEqual(utils.tree_get(tree, 'a/x'), 1)
self.assertEqual(utils.tree_get(tree, 'b/x'), 2)
self.assertEqual(utils.tree_get(tree, 'b/y'), 3)
self.assertEqual(utils.tree_get(tree, 'a'), tree['a'])
self.assertEqual(utils.tree_get(tree, 'b'), tree['b'])
def test_tree_replace(self):
tree = {'a': {'b': 2, 'c': 3}, 'c': 4}
replacements = {
'a/b': 'a/b/x', # replaces 'a/b' with 'a/b/x'
'.*c': 'C', # replaces 'c' with 'C' ('a/c' is removed)
'C': 'D', # replaces 'C' (which was 'c') with 'D'
'.*/c': None, # removes 'a/c'
}
tree2 = utils.tree_replace(tree, replacements)
self.assertEqual(tree2, {'D': 4, 'a': {'b': {'x': 2}}})
def test_tree_compare(self):
tree1_only, tree2_only, dtype_shape_mismatch = utils.tree_compare(
{'a': {'b': jnp.array(2), 'c': jnp.array(3)}},
{'a': {'B': jnp.array(2), 'c': jnp.array(3.)}},
)
self.assertEqual(tree1_only, {'a/b'})
self.assertEqual(tree2_only, {'a/B'})
self.assertEqual(
dtype_shape_mismatch,
{'a/c': [(jnp.dtype('int32'), ()), (jnp.dtype('float32'), ())]})
class StepConversionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('nice_steps', 1000, None, None, dict(foo_steps=3), 3),
('nice_epochs', 1000, 100, None, dict(foo_epochs=3), 30),
('nice_examples', None, 100, None, dict(foo_examples=300), 3),
('nice_percent', None, None, 10, dict(foo_percent=0.30), 3),
('offbyone_steps', 1001, None, None, dict(foo_steps=3), 3),
('offbyone_epochs', 1001, 100, None, dict(foo_epochs=3), 30),
('offbyone_examples', None, 101, None, dict(foo_examples=300), 3),
('offbyone_percent', None, None, 11, dict(foo_percent=0.30), 3),
)
def test_steps(self, data_size, batch_size, total, cfg, expected):
# Correct default usage:
step = utils.steps('foo', cfg, data_size=data_size, batch_size=batch_size,
total_steps=total)
self.assertEqual(step, expected)
# Inexitent entry:
with self.assertRaises(ValueError):
step = utils.steps('bar', cfg, data_size=data_size, batch_size=batch_size,
total_steps=total)
step = utils.steps('bar', cfg, data_size=data_size, batch_size=batch_size,
total_steps=total, default=1234)
self.assertEqual(step, 1234)
class CreateLearningRateScheduleTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('linear', 'linear', {}, 13, .5),
('polynomial', 'polynomial', {'end': .1, 'power': 2}, 13, .325),
('cosine', 'cosine', {}, 13, .5),
('rsqrt', 'rsqrt', {'timescale': 1}, 13, 0.3333333),
('stair_5', 'stair', {'steps': [10], 'mults': [.5]}, 5, 1.),
('stair_10', 'stair', {'steps': [10], 'mults': [.5]}, 10, .5),
('warmup_before', 'rsqrt', {'timescale': 1}, 3, .6),
('cooldown_after', 'rsqrt', {'timescale': 1}, 20, .05),
)
def test_schedule(self, decay_type, extra_kwargs, step, expected_lr):
lr_fn = utils.create_learning_rate_schedule(
total_steps=21,
batch_size=512,
base=.5,
decay_type=decay_type,
scale_with_batchsize=True,
warmup_steps=5,
cooldown_steps=5,
**extra_kwargs)
lr = lr_fn(step)
self.assertAlmostEqual(lr, expected_lr)
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet input pipeline."""
import collections
import functools
import itertools
import math
import big_vision.datasets.core as ds_core
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import einops
import flax.jax_utils as flax_utils
import jax
import tensorflow as tf
def make_for_train(
data, preprocess_fn, batch_size,
shuffle_buffer_size, cache_raw=False, filter_fn=None,
num_parallel_calls=100, prefetch=2):
"""Makes an input pipeline for training."""
data = _add_tpu_host_options(data)
# Use data filtering at your own risk: the actual split sizes won't be known
# in advance, so many things can go wrong in the code.
if filter_fn:
data = data.filter(filter_fn)
data = data.cache() if cache_raw else data
data = data.repeat(None) # repeat data indefinitely
data = data.shuffle(shuffle_buffer_size) if shuffle_buffer_size else data
data = data.map(preprocess_fn, num_parallel_calls=num_parallel_calls)
# Drop remainder makes shape fully static, so we can later use it if needed.
if batch_size:
data = data.batch(batch_size // jax.process_count(), drop_remainder=True)
return data.prefetch(prefetch)
def training(input_config):
"""Reads the data from a single dataset, or mixes it from multiple.
The data is read either from one or mixed from multiple datasets, depending
on the `input_config`.
Args:
input_config: Configures the input pipeline. See input_pipeline_test for
examples.
Returns:
A tuple containing (possibly mixed) tf.data.Dataset and a total number of
training examples.
"""
batch_size = input_config.batch_size
# Handle separately the common case when no mixing happens.
if isinstance(input_config.data.get("name"), str):
train_data = ds_core.get(**input_config.data)
train_ds = make_for_train(
data=train_data.get_tfdata(ordered=False),
batch_size=batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(input_config.get("pp")),
shuffle_buffer_size=input_config.get("shuffle_buffer_size"),
cache_raw=input_config.get("cache_raw", False),
filter_fn=input_config.get("filter_fn"),
)
return train_ds, train_data.total_examples
datasets = []
weights = []
ntraining_examples = 0
for dataset_name, weight in input_config.data.items():
dataset = input_config[dataset_name]
train_data = ds_core.get(**dataset.data)
ntraining_examples += train_data.total_examples
dataset = make_for_train(
data=train_data.get_tfdata(ordered=False),
# Don't batch the data just yet, it will be done after
# mixing the different datasets below.
batch_size=None,
preprocess_fn=pp_builder.get_preprocess_fn(dataset.get("pp")),
shuffle_buffer_size=dataset.get("shuffle_buffer_size"),
cache_raw=dataset.get("cache_raw", False),
filter_fn=dataset.get("filter_fn"),
)
datasets.append(dataset)
weights.append(weight)
# Normalize the weights such that they sum up to 1.
weights = [x / sum(weights) for x in weights]
train_ds = tf.data.Dataset.sample_from_datasets(
datasets, weights, stop_on_empty_dataset=True)
train_ds = train_ds.batch(
input_config["batch_size"] // jax.process_count(), drop_remainder=True)
return train_ds, ntraining_examples
# The pipeline below is used for evals in multi-{G,T}PU and multi-host settings.
# As the total number of examples may not be evenly divisible accross all
# devices, we use the `infinite tf.data padding` trick, which was suggested by
# Andreas Steiner and also implemented by him in the clu library:
# https://github.com/google/CommonLoopUtils/blob/84b777c42dfd3fb6685537138433bfeb5241a006/clu/deterministic_data.py#L304.
def make_for_inference(
data, preprocess_fn, batch_size, num_ex_per_process,
cache_raw=False, cache_final=False):
"""Makes an input pipeline for inference."""
data = _add_tpu_host_options(data)
data = data.cache() if cache_raw else data
data = data.map(_add_mask(preprocess_fn), num_parallel_calls=100)
data = data.concatenate(_get_pad_data(data))
local_batch_size = batch_size // jax.process_count()
# This is just like `batch`, but allows batching elements of different shapes
# into a tf.RaggedTensor. Elements of the same fixed shape remain tf.Tensors.
# Since we do 'infinite' padding it is safe to drop the remainder.
data = data.apply(tf.data.experimental.dense_to_ragged_batch(
batch_size=local_batch_size, drop_remainder=True))
# We need to make sure that all hosts process all data and exactly the same
# number of batches. Below we take max per-host num examples and use it on all
# hosts to derive the number of batches.
num_batches = math.ceil(max(num_ex_per_process) / local_batch_size)
data = data.take(num_batches)
# Note we cache data after a finite number of batches is taken.
data = data.cache() if cache_final else data
data = data.repeat()
return data.prefetch(1), num_batches
def _get_pad_data(data):
def zeros_like_spec(spec):
# For unknown/flexible dimensions (None), just use 0 instead.
return tf.zeros([x or 0 for x in spec.shape], spec.dtype)
zero = jax.tree_map(zeros_like_spec, data.element_spec)
return tf.data.Dataset.from_tensors(zero).repeat()
def _add_mask(pp_fn):
def _pp_fn(example):
return {"_mask": tf.constant(1), **pp_fn(example)}
return _pp_fn
def _add_tpu_host_options(data):
options = tf.data.Options()
options.threading.private_threadpool_size = 48
options.threading.max_intra_op_parallelism = 1
return data.with_options(options)
def prefetch_iterator(it, n):
"""Runs iterator `it` ahead for `n` steps. Adapted from flax."""
if not n:
yield from it
return
queue = collections.deque()
def enqueue(n_steps): # Enqueues *up to* `n` elements from the iterator.
for data in itertools.islice(it, n_steps):
queue.append(data)
enqueue(n) # Fill up the buffer.
while queue:
yield queue.popleft()
enqueue(1)
def shard_and_put(x, shard=True, put=True):
# pylint: disable=protected-access
x = x._numpy() # avoids redundant copy when converting tf tensors to numpy.
if shard:
x = einops.rearrange(x, "(d l) ... -> d l ...", d=jax.local_device_count())
if shard and put: # Only works for pmap (for now).
x = jax.device_put_sharded(list(x), flax_utils._pmap_device_order())
return x
# pylint: enable=protected-access
def start_input_pipeline(data, n_prefetch=1, shard=True):
fn = functools.partial(shard_and_put, shard=shard, put=n_prefetch)
it = (jax.tree_util.tree_map(fn, elem) for elem in iter(data))
return prefetch_iterator(it, n_prefetch)
def start_ragged_input_pipeline(data, n_prefetch=1, shard=True, ragged=None):
def maybe_shard_and_put(name, x):
return x if name in (ragged or {}) else shard_and_put(x, shard)
it = (u.tree_map_with_names(maybe_shard_and_put, elem) for elem in iter(data))
return prefetch_iterator(it, n_prefetch)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optax."""
from absl.testing import absltest
from absl.testing import parameterized
from big_vision import optax as bv_optax
import chex
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
class OptaxTest(parameterized.TestCase):
def test_get_count(self):
params = jax.tree_map(jnp.array, {"a": 1.})
tx = optax.masked(
optax.scale_by_schedule(lambda step: step),
{"a": True},
)
opt_state = tx.init(params)
self.assertEqual(bv_optax.get_count(opt_state), 0)
_, opt_state = tx.update(params, opt_state)
self.assertEqual(bv_optax.get_count(opt_state), 1)
def test_split_frozen(self):
params = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 1., "bias": 2.},
}) # pyformat: disable
sched1 = dict(decay_type="cosine")
sched2 = dict(decay_type="linear")
schedule = [
(".*/kernel", sched1),
(".*/bias", sched2),
]
masks, scheds = bv_optax._make_mask_trees(params, schedule, log="schedule")
frozen_mask, masks, scheds = bv_optax._split_frozen(masks, scheds)
chex.assert_trees_all_equal(
frozen_mask,
{"Dense_0": {"kernel": False, "bias": False}},
) # pyformat: disable
chex.assert_trees_all_equal(
masks,
(
{"Dense_0": {"kernel": True, "bias": False}},
{"Dense_0": {"kernel": False, "bias": True}},
),
) # pyformat: disable
self.assertEqual(scheds, (sched1, sched2))
# freeze some
schedule = [
(".*/bias", None),
("Dense_0/.*", sched1),
(".*", None),
]
masks, scheds = bv_optax._make_mask_trees(params, schedule, log="schedule")
frozen_mask, masks, scheds = bv_optax._split_frozen(masks, scheds)
chex.assert_trees_all_equal(
frozen_mask,
{"Dense_0": {"kernel": False, "bias": True}},
) # pyformat: disable
chex.assert_trees_all_equal(
masks,
({"Dense_0": {"kernel": True, "bias": False}},),
) # pyformat: disable
self.assertEqual(scheds, (sched1,))
# does not cover all params - fails
schedule = [
(".*/kernel", None),
]
masks, scheds = bv_optax._make_mask_trees(params, schedule, log="schedule")
with self.assertRaisesRegex(AssertionError, "All params must be covered"):
_ = bv_optax._split_frozen(masks, scheds)
def test_replace_frozen(self):
params = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 1., "bias": 2.},
}) # pyformat: disable
schedule = [
(".*/kernel", {}),
(".*", None),
]
chex.assert_trees_all_equal(
bv_optax.replace_frozen(schedule, params, 0.),
{"Dense_0": {"kernel": 1., "bias": 0.}},
) # pyformat: disable
def test_make_simple(self):
params = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 1., "bias": 2.},
}) # pyformat: disable
config = ml_collections.ConfigDict()
config.lr = 0.01
config.schedule = dict(decay_type="linear")
config.optax_name = "scale"
config.optax = ml_collections.ConfigDict()
g_scale = 0.5
config.optax.step_size = g_scale
total_steps = 10
sched_kw = dict(global_batch_size=1, total_steps=total_steps)
tx, (schedule_fn,) = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
grads = jax.tree_map(jnp.ones_like, params)
for step in range(total_steps):
updates, opt_state = tx.update(grads, opt_state)
self.assertEqual(bv_optax.get_count(opt_state), step + 1)
sched = schedule_fn(step)
np.testing.assert_almost_equal(
sched, 1.0 / total_steps * (total_steps - step))
make_tx = lambda sched: lambda g: -sched * config.lr * g_scale * g
chex.assert_trees_all_close(updates, jax.tree_map(make_tx(sched), grads))
def test_make_wd(self):
params = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 1., "bias": 2., "other": 3.},
}) # pyformat: disable
wds = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 2e-3, "bias": 5e-4, "other": 0.},
}) # pyformat: disable
config = ml_collections.ConfigDict()
config.lr = 0.01
config.wd = 1e-3
config.wd_mults = [
(".*/kernel", 2.0),
(".*/bias", 0.5),
]
config.schedule = dict(decay_type="linear")
config.optax_name = "scale"
config.optax = ml_collections.ConfigDict()
g_scale = 0.5
config.optax.step_size = g_scale
total_steps = 10
sched_kw = dict(global_batch_size=1, total_steps=total_steps)
tx, (sched_fn,) = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
grads = jax.tree_map(jnp.ones_like, params)
for step in range(total_steps):
updates, opt_state = tx.update(grads, opt_state, params)
self.assertEqual(bv_optax.get_count(opt_state), step + 1)
sched = sched_fn(step)
np.testing.assert_almost_equal(
sched, 1.0 / total_steps * (total_steps - step))
def make_tx(sched):
def inner(p, g, wd):
return -sched * (config.lr * g_scale * g + p * wd)
return inner
chex.assert_trees_all_close(
updates, jax.tree_map(make_tx(sched), params, grads, wds))
def test_make_clip_norm(self):
params = jax.tree_map(jnp.array, {
"Dense_0": {"kernel": 1., "bias": 2., "other": 3.},
}) # pyformat: disable
config = ml_collections.ConfigDict()
config.lr = 0.01
config.schedule = dict(decay_type="linear")
config.optax_name = "scale"
config.grad_clip_norm = 1.0
config.optax = ml_collections.ConfigDict()
g_scale = 0.5
config.optax.step_size = g_scale
total_steps = 10
sched_kw = dict(global_batch_size=1, total_steps=total_steps)
tx, (sched_fn,) = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
grads = jax.tree_map(jnp.ones_like, params)
gflat = jax.tree_leaves(grads)
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in gflat]))
grad_clip_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
grads_scaled = jax.tree_map(lambda p: grad_clip_factor * p, grads)
for step in range(total_steps):
updates, opt_state = tx.update(grads, opt_state)
self.assertEqual(bv_optax.get_count(opt_state), step + 1)
sched = sched_fn(step)
np.testing.assert_almost_equal(
sched, 1.0 / total_steps * (total_steps - step))
make_tx = lambda sched: lambda g: -sched * config.lr * g_scale * g
chex.assert_trees_all_close(updates,
jax.tree_map(make_tx(sched), grads_scaled))
def test_make_multi(self):
params = jax.tree_map(
jnp.array, {
"Dense_0": {"kernel": 1.0, "bias": 2.0, "other": 3.0},
"Dense_1": {"kernel": 4.0, "bias": 5.0, "other": 6.0},
"Dense_2": {"kernel": 7.0, "bias": 8.0, "other": 9.0},
"Dense_3": {"kernel": 10., "bias": 11., "other": 12.},
}) # pyformat: disable
# Manually specify lr + wd for computing expected values.
lrb = 0.01
lr1 = 2.0
lr2 = 0.5
lr_mults = {
"Dense_0": {"kernel": lr1, "bias": lr1, "other": lr1},
"Dense_1": {"kernel": lr2, "bias": lr2, "other": lr2},
"Dense_2": {"kernel": 1.0, "bias": 1.0, "other": 1.0},
"Dense_3": {"kernel": 1.0, "bias": 1.0, "other": 1.0},
} # pyformat: disable
wdb = 1e-3
wd1 = 10.0
wd2 = 0.1
wds = jax.tree_map(
jnp.array, {
"Dense_0": {"kernel": wd1 * wdb, "bias": wd2 * wdb, "other": 0.},
"Dense_1": {"kernel": wd1 * wdb, "bias": wd2 * wdb, "other": 0.},
"Dense_2": {"kernel": wd1 * wdb, "bias": wd2 * wdb, "other": 0.},
"Dense_3": {"kernel": 0.0 * wdb, "bias": 0.0 * wdb, "other": 0.},
}) # pyformat: disable
config = ml_collections.ConfigDict()
config.lr = lrb
config.lr_mults = [
("Dense_0/.*", lr1),
("Dense_1/.*", lr2),
]
config.wd = wdb
config.wd_mults = [
(".*/kernel", wd1),
(".*/bias", wd2),
]
mult1 = 1.0
mult2 = 0.1
config.schedule = [
("Dense_0/.*", dict(decay_type="linear", mult=mult1, linear_end=mult1)),
("Dense_[12]/.*", dict(decay_type="linear", mult=mult2)),
(".*", None),
]
config.optax_name = "scale"
config.grad_clip_norm = 1.0
config.optax = ml_collections.ConfigDict()
g_scale = 0.5
config.optax.step_size = g_scale
total_steps = 10
sched_kw = dict(global_batch_size=1, total_steps=total_steps)
tx, (sched_fn1,
sched_fn2) = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
# Manually specify schedules for computing expected values.
frozen_fn = lambda _: jnp.array(0.)
sched_fns = {
"Dense_0": {"kernel": sched_fn1, "bias": sched_fn1, "other": sched_fn1},
"Dense_1": {"kernel": sched_fn2, "bias": sched_fn2, "other": sched_fn2},
"Dense_2": {"kernel": sched_fn2, "bias": sched_fn2, "other": sched_fn2},
"Dense_3": {"kernel": frozen_fn, "bias": frozen_fn, "other": frozen_fn},
} # pyformat: disable
grads = jax.tree_map(jnp.ones_like, params)
gflat, _ = jax.tree_flatten(
# Don't count frozen params towards gradient norm.
jax.tree_map(lambda g, sched_fn: {frozen_fn: 0}.get(sched_fn, g),
grads, sched_fns))
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in gflat]))
grad_clip_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
grads_scaled = jax.tree_map(lambda p: grad_clip_factor * p, grads)
def make_tx(step):
def get_update(p, g, wd, sched_fn, lr_mult):
return -sched_fn(step) * (lrb * lr_mult * g_scale * g + p * wd)
return get_update
for step in range(total_steps):
updates, opt_state = tx.update(grads, opt_state, params)
self.assertEqual(bv_optax.get_count(opt_state), step + 1)
sched1, sched2 = sched_fn1(step), sched_fn2(step)
np.testing.assert_almost_equal(sched1, mult1)
np.testing.assert_almost_equal(sched2,
mult2 * (total_steps - step) / total_steps)
chex.assert_trees_all_close(
updates,
jax.tree_map(
make_tx(step), params, grads_scaled, wds, sched_fns, lr_mults))
def test_frozen_no_state(self):
params = {"small": jnp.zeros([1]), "large": jnp.zeros([1000])}
config = ml_collections.ConfigDict()
config.lr = 0.01
config.schedule = [
("small", dict(decay_type="cosine")),
("large", None),
]
config.optax_name = "scale_by_adam"
sched_kw = dict(global_batch_size=1, total_steps=1)
tx, _ = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
adam_state = bv_optax.find_states(opt_state, optax.ScaleByAdamState)
nbytes = sum(
jax.tree_flatten(jax.tree_map(lambda x: x.nbytes, adam_state))[0])
self.assertLess(nbytes, 1_000)
def test_adafactor(self):
params = {"Dense_0": {"kernel": jnp.zeros([1024, 1024])}}
config = ml_collections.ConfigDict()
config.optax_name = "big_vision.scale_by_adafactor"
config.lr = 0.01
config.schedule = dict(decay_type="linear")
sched_kw = dict(global_batch_size=1, total_steps=1)
tx, _ = bv_optax.make(config, params, sched_kw=sched_kw)
opt_state = tx.init(params)
adafactor_state = bv_optax.find_states(opt_state, optax.FactoredState)
n_state_params = sum(
jax.tree_flatten(
jax.tree_map(lambda x: np.prod(
x.shape if hasattr(x, "shape") else 0), adafactor_state))[0])
self.assertEqual(n_state_params, 2 * 1024 + 2)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gradient transformations and other optax utilities."""
import operator
import big_vision.utils as u
import jax
import jax.numpy as jnp
import optax
def find_states(opt_state, cls):
leaves = jax.tree_util.tree_leaves(
opt_state, is_leaf=lambda node: isinstance(node, cls))
return [leaf for leaf in leaves if isinstance(leaf, cls)]
def get_count(opt_state):
"""Returns `ScaleByScheduleState.count` from `opt_state` as an integer."""
counts = {
int(state.count)
for state in find_states(opt_state, optax.ScaleByScheduleState)
}
assert len(counts) == 1, f"Expected exactly 1 ScaleByScheduleState: {counts}"
return next(iter(counts))
def replace_frozen(schedule, pytree, replacement, log=None):
"""Replaces values matching frozen params in `pytree` with `replacement`."""
if not isinstance(schedule, (list, tuple)):
return pytree
masks, scheds = _make_mask_trees(pytree, schedule, log=log)
frozen_mask, _, _ = _split_frozen(masks, scheds)
return jax.tree_map(
lambda v, f: replacement if f else v, pytree, frozen_mask)
def make(config, params, *, sched_kw):
"""Returns gradient transform and learning rate functions."""
# Global schedule. No schedule means frozen.
schedule = config.schedule
if not isinstance(schedule, (tuple, list)):
schedule = [(".*", schedule)]
masks, scheds = _make_mask_trees(params, schedule, "config.schedule")
frozen_mask, masks, scheds = _split_frozen(masks, scheds)
not_frozen_mask = jax.tree_map(operator.not_, frozen_mask)
def create_schedule(mult=1.0, **kw):
assert "base" not in kw, kw
return u.create_learning_rate_schedule(base=mult, **kw)
schedule_fns = [create_schedule(**sched_kw, **sched) for sched in scheds]
schedule_txs = [
optax.masked(optax.scale_by_schedule(schedule_fn), mask)
for schedule_fn, mask in zip(schedule_fns, masks)
] + [
# Removes weight decay updates. Note that weight decay already has an
# independent mask (which cannot be combined easily with a second mask),
# so instead we multiply updates for frozen params with zero.
optax.masked(optax.set_to_zero(), frozen_mask)
]
# Gradient clipping.
grad_clip_norm_tx = (
optax.masked(optax.clip_by_global_norm(config.grad_clip_norm),
not_frozen_mask)
if config.get("grad_clip_norm") else optax.identity())
# Optimizer updates.
tx_func = operator.attrgetter(config.optax_name)(optax)
opt_txs = [optax.masked(tx_func(**config.get("optax", {})), not_frozen_mask)]
assert "optim" not in config, "Deprecated option, use config.optax."
# Learning rate multipliers. Defaults to 1.0.
lr_mult_txs = [optax.scale(config.lr)]
if config.get("lr_mults"):
masks, mults = _make_mask_trees(params, config.lr_mults, "config.lr_mults")
assert all(mult > 0 for mult in mults), (
f"Use schedule=None for parameter freezing instead of lr_mults={mults}")
lr_mult_txs += [
optax.masked(optax.scale(mult), mask)
for mult, mask in zip(mults, masks)
]
# Weight decay. Defaults to 0.0.
# Weight decay is not gradient-based but instead uses "params side-input".
# Hence, weight decay is additive and independent of previous gradient-based
# updates.
assert "weight_decay" not in config, "Deprecated option. Use wd and schedule."
assert config.get("weight_decay_decouple", True), (
"Coupled weight decay not supported anymore.")
if config.get("wd"):
wd_mults = config.get("wd_mults", [(".*/kernel$", 1.0)])
masks, mults = _make_mask_trees(params, wd_mults, "config.wd_mults")
weight_decay_txs = [
optax.add_decayed_weights(config.wd * mult, mask)
for mult, mask in zip(mults, masks)
]
else:
weight_decay_txs = []
# Combine gradient updates and learning rate schedules.
return optax.chain(
grad_clip_norm_tx,
*opt_txs,
*lr_mult_txs,
*weight_decay_txs,
*schedule_txs,
optax.scale(-1.0)), schedule_fns
def _make_mask_trees(params, patterns_values, log):
patterns, values = zip(*patterns_values)
masks = u.make_mask_trees(params, patterns, log=log)
return masks, values
def _split_frozen(masks, scheds):
"""Computes `frozen_mask` and updates `masks` and `scheds`."""
# Specifying `None` as a scheduler freezes params.
all_false = jax.tree_map(lambda *bools: not any(bools), *masks)
assert not any(jax.tree_flatten(all_false)[0]), (
f"All params must be covered (use `None` for freezing): {all_false}")
frozen_masks = [
mask for mask, sched in zip(masks, scheds) if sched is None]
frozen_mask = jax.tree_map(
lambda *bools: any(bools), *frozen_masks,
all_false) # `all_false` is required when `frozen_masks==[]`.
masks, scheds = zip(*(
(mask, sched) for mask, sched in zip(masks, scheds) if sched is not None))
return frozen_mask, masks, scheds
############ Custom BigVision optimizers #######################################
# Currently there's only one custom optimizer and we don't foresee new ones in
# the near future, we opt not to create a new optimizer folder/module for just
# one isolated case. If there will be more optimizers, we can consider moving
# them into individual files in a subfolder.
# A dummy object to allow for foo.bar access syntax, see
# https://stackoverflow.com/a/19476841/2366315
optax.big_vision = type("", (), {})()
def scale_by_adafactor(min_dim_size_to_factor=32,
decay_rate=0.8, decay_offset=0,
beta2_cap=0.999,
clipping_threshold=None,
momentum=0.9, dtype_momentum=jnp.bfloat16,
eps=1e-30):
"""The BigVision variant of Adafactor optimizer."""
def _decay_rate_pow(i, exponent):
"""Second-order moment decay schedule."""
t = jnp.array(i, jnp.float32) + 1.0
return jnp.minimum(beta2_cap, 1.0 - t**(-exponent))
scale_by_rms = optax.scale_by_factored_rms(
factored=True,
decay_rate=decay_rate,
step_offset=decay_offset,
min_dim_size_to_factor=min_dim_size_to_factor,
epsilon=eps,
decay_rate_fn=_decay_rate_pow)
clip = (optax.clip_by_block_rms(clipping_threshold) if clipping_threshold
else optax.identity())
mom = (optax.ema(momentum, debias=False, accumulator_dtype=dtype_momentum)
if momentum else optax.identity())
return optax.chain(scale_by_rms, clip, mom)
optax.big_vision.scale_by_adafactor = scale_by_adafactor # pytype: disable=module-attr
# A few more aliases we use frequently:
def momentum_hp(momentum=0.9, dtype=jnp.bfloat16, nesterov=False):
"""SGD-Momentum with half-precision accumulator."""
return optax.trace(decay=momentum, accumulator_dtype=dtype, nesterov=nesterov)
optax.big_vision.momentum_hp = momentum_hp # pytype: disable=module-attr
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils very specific to this project, not generic."""
import collections
import contextlib
import dataclasses
import functools
import io
import json
import multiprocessing
import multiprocessing.pool
import os
import re
import sys
import time
from typing import Mapping
from absl import flags
from absl import logging
from big_vision.pp import registry as pp_registry
import einops
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import ml_collections as mlc
import numpy as np
import tensorflow.io.gfile as gfile # pylint: disable=consider-using-from-import
Registry = pp_registry.Registry
# pylint: disable=logging-fstring-interpolation
def pad_shard_unpad(wrapped, static_argnums=(0,), static_argnames=()):
"""Wraps a function with code that pads, shards, then un-shards, un-pads.
Args:
wrapped: the function to be wrapped. Signature is `params, *args, *kwargs`.
static_argnums: indices of arguments to `wrapped` that should _not_ be
padded and sharded, but instead be forwarded as-is. The default is (0,)
because by far the most common use-case is to pass `params` first.
static_argnames: names of kwargs to `wrapped` that should _not_ be padded
and sharded, but instead be forwarded as-is.
Returns:
A new function that pads and shards its arguments before passing them to
the wrapped function, and un-shards and un-pads the returned pytree.
This is useful for calling a pmap'ed function with inputs that aren't
divisible by the number of devices. A typical use is:
@pad_shard_unpad
@jax.pmap
def forward(params, x): ...
Notes:
The padding is done in host-memory before being passed to the function, and
the values returned by the function are transferred back to host memory.
The returned function is augmented with a new keyword-only argument
`min_device_batch` that, if specified, forces padding inputs to at least
this size per device. This can be useful to avoid recompiles for the last
batch and reduce memory fragmentation.
"""
def pad_shard_unpad_wrapper(*args, min_device_batch=None, **kw):
d = jax.local_device_count() # d = devices, b = batch
batch_sizes = (
{a.shape[0] for i, a in enumerate(args) if i not in static_argnums} |
{v.shape[0] for k, v in kw.items() if k not in static_argnames})
assert len(batch_sizes) == 1, f"Inconsistent batch-sizes: {batch_sizes}"
b = batch_sizes.pop()
def maybe_pad(x, actually_pad=True):
if not actually_pad: return x # For call-site convenience below.
_, *shape = x.shape
db, rest = divmod(b, d)
if rest:
x = np.concatenate([x, np.zeros((d - rest, *shape), x.dtype)], axis=0)
db += 1
if min_device_batch and db < min_device_batch:
x = np.concatenate(
[x, np.zeros((d * (min_device_batch - db), *shape), x.dtype)])
db = min_device_batch
return x.reshape(d, db, *shape)
args = [maybe_pad(a, i not in static_argnums) for i, a in enumerate(args)]
kw = {k: maybe_pad(v, k not in static_argnames) for k, v in kw.items()}
out = wrapped(*args, **kw)
def unpad(x):
# Transfer back before cutting, to reduce on-device shape diversity.
return einops.rearrange(jax.device_get(x), "d b ... -> (d b) ...")[:b]
return jax.tree_util.tree_map(unpad, out)
return pad_shard_unpad_wrapper
def onehot(labels, num_classes, on_value=1.0, off_value=0.0):
x = (labels[..., None] == jnp.arange(num_classes)[None])
x = jax.lax.select(x, jnp.full(x.shape, on_value),
jnp.full(x.shape, off_value))
return x.astype(jnp.float32)
def npload(fname):
"""Loads `fname` and returns an np.ndarray or dict thereof."""
# Load the data; use local paths directly if possible:
if os.path.exists(fname):
loaded = np.load(fname, allow_pickle=False)
else:
# For other (remote) paths go via gfile+BytesIO as np.load requires seeks.
with gfile.GFile(fname, "rb") as f:
data = f.read()
loaded = np.load(io.BytesIO(data), allow_pickle=False)
# Support loading both single-array files (np.save) and zips (np.savez).
if isinstance(loaded, np.ndarray):
return loaded
else:
return dict(loaded)
def load_checkpoint(tree, npz):
"""Loads a jax pytree from a npz file.
Args:
tree: deprecated, use None.
Bwd-compat for old format that only stored values: the pytree structure.
npz: Either path to the checkpoint file (.npz), or a dict-like.
Returns:
A pytree that is the checkpoint.
"""
if isinstance(npz, str): # If not already loaded, then load.
npz = npload(npz)
keys, values = zip(*list(npz.items()))
if tree:
checkpoint = tree.unflatten(values)
else:
checkpoint = recover_tree(keys, values)
return checkpoint
def load_params(tree, npz):
"""Loads a parameters from a npz checkpoint.
Args:
tree: deprecated, use None.
Bwd-compat for old format that only stored values: the pytree structure.
npz: Either path to the checkpoint file (.npz), or a dict-like.
Returns:
A pytree that is the checkpoint.
Notes:
The filename can contain an indicator like `/path/to/file.npz:keyname`, in
which case ["opt"]["params"]["keyname"] will become ["opt"]["params"] in
the returned checkpoint. This allows ANY model that uses this function to
load itself from a checkpoint that contains multiple sub-models, such as
checkpoints generated from image_text or Distillation trainers.
"""
key = None # Whether we want to extract only a sub-key of the model.
if isinstance(npz, str):
if ((":" in npz and "://" not in npz) or # Like /path/to/file:subtree_name
("://" in npz and npz.count(":") == 2)): # Like gs://path/to/file:sub
npz, key = npz.rsplit(":", 1)
checkpoint = load_checkpoint(tree, npz)
if "params" in checkpoint:
# Checkpoint with optax state (after cl/423007216).
params = checkpoint["params"]
elif "opt" in checkpoint:
# Checkpoint with Flax optimizer.
params = checkpoint["opt"]["target"]
else:
# When open-sourcing, we usually shared only the params directly.
params = checkpoint
if key is not None:
params = tree_get(params, key)
return params
def prefetch_scalar(it, nprefetch=1, devices=None):
n_loc_dev = len(devices) if devices else jax.local_device_count()
repl_iter = (np.ones(n_loc_dev) * i for i in it)
return flax_utils.prefetch_to_device(repl_iter, nprefetch, devices)
def sigmoid_xent(*, logits, labels, reduction=True):
# NOTE: This implementation is stable, see these two:
# (internal link)
# https://github.com/google/jax/issues/2140
log_p = jax.nn.log_sigmoid(logits)
log_not_p = jax.nn.log_sigmoid(-logits)
nll = -jnp.sum(labels * log_p + (1. - labels) * log_not_p, axis=-1)
return jnp.mean(nll) if reduction else nll
def bidirectional_contrastive_loss(zimg, ztxt, t, mask=None, reduction=False):
"""Bidirectional contrastive loss (e.g. for contrastive trainer/evaluator)."""
# BF.FB = BB
logits = jnp.dot(zimg, ztxt.T) * t
if mask is not None:
# Set to negative infinity where mask = 0. Masked examples will disappear
# under softmax, and be ignored by ncorrect (NINF will never win argmax).
exclude = jnp.logical_not(mask) # Now 1 if we don't want to keep.
exclude = jnp.logical_or(exclude[:, None], exclude[None, :])
logits = jnp.where(exclude, jnp.NINF, logits)
# Note: assumed t is in a good range e.g. already passed through exp/softplus.
l1 = -jnp.diag(jax.nn.log_softmax(logits, axis=1)) # NLL img->txt
l2 = -jnp.diag(jax.nn.log_softmax(logits, axis=0)) # NLL txt->img
l = 0.5 * (l1 + l2)
if mask is not None:
l = jnp.where(mask, l, 0)
redux = jnp.mean if reduction else lambda x: x
if reduction and mask is not None:
redux = lambda x: jnp.sum(x * mask) / (jnp.sum(mask) + 1e-8)
# Also return extra measurements.
return redux(l), {
"ncorrect": redux(jnp.argmax(logits, axis=1) == jnp.arange(len(logits))),
}
def softmax_xent(*, logits, labels, reduction=True, kl=False, axis=-1):
log_p = jax.nn.log_softmax(logits, axis=axis)
nll = -jnp.sum(labels * log_p, axis=axis)
if kl:
nll += jnp.sum(labels * jnp.log(jnp.clip(labels, 1e-8)), axis=axis)
return jnp.mean(nll) if reduction else nll
def weighted_softmax_xent(*,
logits,
labels,
reduction=True,
weights=None,
label_smoothing=0.0,
normalize=True):
"""Compute weighted cross entropy.
Args:
logits: [batch, length, num_classes] float array.
labels: categorical targets [batch, length] int array.
reduction: reduce across batch dim.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
normalize: normalize each "sentence" loss by the number of tokens in it.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != labels.ndim + 1:
raise ValueError("Incorrect shapes. Got shape %s logits and %s targets" %
(str(logits.shape), str(labels.shape)))
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
soft_targets = onehot(
labels, vocab_size, on_value=confidence, off_value=low_confidence)
loss = -jnp.sum(soft_targets * jax.nn.log_softmax(logits), axis=-1)
normalizing_factor = labels.shape[1]
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum(axis=1)
loss = loss.sum(axis=1)
if normalize:
loss = loss / normalizing_factor
return loss.mean() if reduction else loss
def accumulate_gradient(loss_and_grad_fn, params, images, labels, accum_steps):
"""Accumulate gradient over multiple steps to save on memory."""
# See (internal link) for details and experiments.
if accum_steps and accum_steps > 1:
assert images.shape[0] % accum_steps == 0, (
f"Bad accum_steps {accum_steps} for batch size {images.shape[0]}")
step_size = images.shape[0] // accum_steps
l, g = loss_and_grad_fn(params, images[:step_size], labels[:step_size])
def acc_grad_and_loss(i, l_and_g):
imgs = jax.lax.dynamic_slice(images, (i*step_size, 0, 0, 0),
(step_size,) + images.shape[1:])
lbls = jax.lax.dynamic_slice(labels, (i*step_size, 0),
(step_size, labels.shape[1]))
li, gi = loss_and_grad_fn(params, imgs, lbls)
l, g = l_and_g
return (l + li, jax.tree_util.tree_map(lambda x, y: x + y, g, gi))
l, g = jax.lax.fori_loop(1, accum_steps, acc_grad_and_loss, (l, g))
return jax.tree_util.tree_map(lambda x: x / accum_steps, (l, g))
else:
return loss_and_grad_fn(params, images, labels)
def itstime(step, every_n_steps, total_steps, host=None, last=True, first=True,
drop_close_to_last=0.25):
"""Returns True if it's time to execute an action.
Args:
step: the current step representing "now".
every_n_steps: the action should run every this many steps.
total_steps: the step number of the last step of training.
host: host number. If provided, only run if we are this process.
last: whether to run on the last step or not.
first: whether to run on the first step or not.
drop_close_to_last: if a step would run, but is this close (in terms of
fraction of every_n_step) to the last one, skip.
Returns:
True if the action should be executed, False if not.
"""
# This logic avoids running `itstime` "a few" steps before the last step.
# Canonical example: don't save checkpoint 2 steps before the last, and then
# at the last again; it's pointless and checkpoint timing will time out.
close_to_last = False
if drop_close_to_last and every_n_steps:
close_to_last = abs(step - total_steps) < drop_close_to_last * every_n_steps
is_host = host is None or jax.process_index() == host
is_step = every_n_steps and (step % every_n_steps == 0) and not close_to_last
is_last = every_n_steps and step == total_steps
is_first = every_n_steps and step == 1
return is_host and (is_step or (last and is_last) or (first and is_first))
def checkpointing_timeout(writer, timeout):
# Make sure checkpoint writing is not a bottleneck
if writer is not None:
try:
writer.get(timeout=timeout)
except multiprocessing.TimeoutError as e:
raise TimeoutError(
"Checkpoint writing seems to be a bottleneck. Make sure you do "
"not do something wrong, like writing checkpoints to a distant "
"cell. In a case you are OK with checkpoint writing being a "
"bottleneck, you can configure `ckpt_timeout` parameter") from e
def hms(s):
"""Format time in hours/minutes/seconds."""
if s < 60:
return f"{s:.0f}s"
m, s = divmod(s, 60)
if m < 60:
return f"{m:.0f}m{s:.0f}s"
h, m = divmod(m, 60)
return f"{h:.0f}h{m:.0f}m" # Seconds intentionally omitted.
class Chrono:
"""Measures time and reports progress, hyper-specific to our train loops.
Some concepts:
1. This differentiates between three "types" of time:
- training time: the time spent on actual training (fprop/bprop/update)
- program time: overall time the program runs, including all overheads
- pause time: the chronometer can be paused (eg during evals).
2. This handles a "warmup": the first step is skipped for training time
purposes, as it includes significant compilation overheads, which distort
estimates.
3. `accum`ulates (i.e. integrates) timings, and save/load them across
restarts.
"""
def __init__(self):
self._timing_history = collections.defaultdict(list)
self._measure = None
self._write_note = None
self.program_start_time = time.monotonic()
self.train_start_time = None
self.train_start_step = None # When we started timing (after warmup)
self.prev_time = None
self.prev_step = None
self.pause_start = None
self.paused_time = 0
self.total_steps = None
self.global_bs = None
self.steps_per_epoch = None
self.warmup = 2 # How many calls to `tick` to skip.
self.load() # Inits accum integrators.
self.note = "Chrono n/a"
def inform(self, *, first_step=None, total_steps=None, global_bs=None,
steps_per_epoch=None, measure=None, write_note=None):
"""Provide some extra info that's only known later in the program."""
# The pattern of `self.x = x or self.x` allows one to call `inform` various
# times with various subset of information (args), as they become available.
# Except for `first_step` which can be 0 so is a bit more verbose.
self.prev_step = first_step if first_step is not None else self.prev_step
self.total_steps = total_steps or self.total_steps
self.steps_per_epoch = steps_per_epoch or self.steps_per_epoch
self.global_bs = global_bs or self.global_bs
self._measure = measure or self._measure
self._write_note = write_note or self._write_note
if self.total_steps and self.prev_step is not None:
self.note = (f"Steps:{self.prev_step}/{self.total_steps} "
f"[{self.prev_step/self.total_steps:.1%}]")
def tick(self, step, measure=None, write_note=None):
"""A chronometer tick."""
if step == self.prev_step: return # Can happen from evals for example.
measure = measure or self._measure
write_note = write_note or self._write_note
now = time.monotonic()
measure("uptime", now - self.program_start_time)
self.flush_timings()
# We do always count examples, regardless of the timing-related warmup that
# happens a few lines below.
ds = step - self.prev_step # Steps between ticks
self.prev_step = step
self.accum_examples_seen += ds * self.global_bs
measure("examples_seen", self.accum_examples_seen)
measure("progress", step / self.total_steps)
if self.steps_per_epoch:
measure("epoch", step / self.steps_per_epoch)
# We take the start as the second time `tick` is called, so we avoid
# measuring the overhead of compilation and don't include it in time
# estimates.
if self.warmup > 1:
self.warmup -= 1
write_note(self.note) # This can help debugging.
return
if self.warmup == 1:
self.train_start_time = self.prev_time = now
self.train_start_step = step
self.accum_program_time += now - self.program_start_time
self.paused_time = 0 # Drop pauses that happened before timing starts.
self.warmup = 0
write_note(self.note) # This can help debugging.
return
# Measurement with micro-timings of current training steps speed.
# Time between ticks (ignoring pause)
dt = now - self.prev_time - self.paused_time
ncores = jax.device_count() # Global device count
measure("img/sec/core", self.global_bs * ds / dt / ncores)
# Accumulate (integrate) times, good for plots.
self.accum_train_time += dt
self.accum_pause_time += self.paused_time
self.accum_program_time += dt + self.paused_time
# Convert to, and log as, core hours.
core_hours = self.accum_train_time * ncores / 60 / 60
devtype = jax.devices()[0].device_kind
measure(f"core_hours_{devtype}", core_hours)
measure("core_hours", core_hours) # For convenience as x-axis in sweeps.
# Progress note with "global" full-program average timings
# (eg in program-time minus warmup)
dt = now - self.train_start_time # Time elapsed since end of warmup.
steps_timed = step - self.train_start_step
steps_todo = self.total_steps - step
self.note = f"Steps:{step}/{self.total_steps} [{step/self.total_steps:.1%}]"
self.note += f"\nWalltime:{hms(self.accum_program_time)}"
self.note += f" ({hms(self.accum_pause_time)} eval)"
self.note += f"\nETA:{hms(dt / steps_timed * steps_todo)}"
self.note += f"\nTotal train time:{hms(dt / steps_timed * self.total_steps)}"
write_note(self.note)
self.prev_time = now
self.paused_time = 0
def pause(self, wait_for=()):
assert self.pause_start is None, "Don't pause twice."
jax.block_until_ready(wait_for)
self.pause_start = time.monotonic()
def resume(self):
self.paused_time += time.monotonic() - self.pause_start
self.pause_start = None
def save(self):
return dict(
accum_program_time=self.accum_program_time,
accum_train_time=self.accum_train_time,
accum_pause_time=self.accum_pause_time,
accum_examples_seen=self.accum_examples_seen,
)
def load(self, ckpt={}): # pylint: disable=dangerous-default-value
self.accum_program_time = ckpt.get("accum_program_time", 0.0)
self.accum_train_time = ckpt.get("accum_train_time", 0.0)
self.accum_pause_time = ckpt.get("accum_pause_time", 0.0)
self.accum_examples_seen = ckpt.get("accum_examples_seen", 0)
@contextlib.contextmanager
def log_timing(self, name, *, noop=False):
"""Use this when you time sth once per step and want instant flushing."""
t0 = time.monotonic()
yield
dt = time.monotonic() - t0
if not noop:
self._measure(name, dt)
logging.info("TIMING[%s]: %s", name, dt)
logging.flush()
@contextlib.contextmanager
def log_timing_avg(self, name, *, noop=False):
"""Use this when you time sth multiple times per step (eg in a loop)."""
t0 = time.monotonic()
yield
dt = time.monotonic() - t0
if not noop:
self._timing_history[name].append(dt)
logging.info("TIMING[%s]: avg %s current %s",
name, np.mean(self._timing_history[name]), dt)
logging.flush()
def flush_timings(self):
for name, times in self._timing_history.items():
self._measure(name, np.mean(times))
self._timing_history.clear()
# Singleton to use from everywhere. https://stackoverflow.com/a/6760726/2366315
chrono = Chrono()
def _traverse_with_names(tree, with_inner_nodes=False):
"""Traverses nested dicts/dataclasses and emits (leaf_name, leaf_val)."""
if dataclasses.is_dataclass(tree):
tree = flax.serialization.to_state_dict(tree)
# Don't output the non-leaf nodes. If the optimizer doesn't have a state
# the tree leaves can be Nones which was interpreted as a leaf by this
# function but not by the other functions (like jax.tree_util.tree_map).
if tree is None:
return
elif isinstance(tree, Mapping):
keys = sorted(tree.keys())
for key in keys:
for path, v in _traverse_with_names(tree[key], with_inner_nodes):
yield (key + "/" + path).rstrip("/"), v
if with_inner_nodes:
yield "", tree
elif isinstance(tree, (list, tuple)):
for idx in range(len(tree)):
for path, v in _traverse_with_names(tree[idx], with_inner_nodes):
yield (str(idx) + "/" + path).rstrip("/"), v
if with_inner_nodes:
yield "", tree
else:
yield "", tree
def tree_flatten_with_names(tree):
"""Populates tree_flatten with leaf names.
This function populates output of tree_flatten with leaf names, using a
custom traversal that produces names is provided. The custom traversal does
NOT have to traverse tree in the same order as jax, as we take care of
automatically aligning jax' and custom traversals.
Args:
tree: python tree.
Returns:
A list of values with names: [(name, value), ...]
"""
vals, tree_def = jax.tree_flatten(tree)
# "Fake" token tree that is use to track jax internal tree traversal and
# adjust our custom tree traversal to be compatible with it.
tokens = range(len(vals))
token_tree = tree_def.unflatten(tokens)
val_names, perm = zip(*_traverse_with_names(token_tree))
inv_perm = np.argsort(perm)
# Custom traverasal should visit the same number of leaves.
assert len(val_names) == len(vals)
return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def
def tree_unflatten(names_and_vals):
"""Reverses `tree_flatten_with_names(tree)[0]`."""
return recover_tree(*zip(*names_and_vals))
def tree_map_with_names(f, tree, *rest):
"""Like jax.tree_util.tree_map but with a filter on the leaf path name.
Args:
f: A function with first parameter `name` (path-like "a/b/c") and remaining
parameters values of `tree` and `*rest` corresponding to the given `name`
Should return a new value for parameter `name`.
tree: The tree of parameters `f` should be applied to.
*rest: more trees of the exact same structure.
Returns:
A tree identical in structure to `tree` and `*rest` but with the leaves the
result of calling `f` on corresponding name/leaves in `tree` and `*rest`.
"""
names_and_vals, tree_def = tree_flatten_with_names(tree)
names, vals = zip(*names_and_vals)
rest_vals = [list(zip(*tree_flatten_with_names(t)[0]))[1] for t in rest]
vals = [f(*name_and_vals) for name_and_vals in zip(names, vals, *rest_vals)]
return tree_def.unflatten(vals)
def tree_map_with_regex(f, tree, regex_rules, not_f=lambda x: x, name=None):
"""Apply jax-style tree_map based on regex rules.
Args:
f: a function that is being applied to every variable.
tree: jax tree of arrays.
regex_rules: a list of tuples `(pattern, args)`, where `pattern` is a regex
which used for variable matching and `args` are positional arguments
passed to `f`. If some variable is not matched, we apply `not_f` transform
which is id by default. If multiple patterns match, then only the first
rule is applied.
not_f: optional function which is applied to variables that do not match any
pattern.
name: a name of transform for logging purposes.
Returns:
a tree, transformed by `f` according to the given rules.
"""
def _f(vname, v):
for pattern, arg in regex_rules:
if re.fullmatch(pattern, vname):
if name and jax.process_index() == 0:
logging.info("Applying %s to %s with %s due to `%s`",
name, vname, arg, pattern)
return f(v, arg)
return not_f(v)
return tree_map_with_names(_f, tree)
def tree_get(tree, name):
"""Get an entry of pytree by flattened key name, eg a/b/c, with nice error.
Args:
tree: the pytree to be queried.
name: the path to extract from the tree, see below for examples.
Returns:
A few examples:
tree = {'a': 1, 'b': {'c': 2, 'd': 3}}
tree_get(tree, 'a') == 1
tree_get(tree, 'b/c') == 2
tree_get(tree, 'b') == {'c': 2, 'd': 3}
"""
flattened = dict(_traverse_with_names(tree, with_inner_nodes=True))
try:
return flattened[name]
except KeyError as e:
class Msg(str): # Reason: https://stackoverflow.com/a/70114007/2366315
def __repr__(self):
return str(self)
msg = "\n".join([name, "Available keys:", *flattened, ""])
# Turn into configdict to use its "did you mean?" error message!
msg = mlc.ConfigDict(flattened)._generate_did_you_mean_message(name, msg) # pylint: disable=protected-access
raise KeyError(Msg(msg)) from e
def tree_replace(tree, replacements):
"""Renames/removes (nested) keys.
Example usage:
tree = {'a': {'b': 2, 'c': 3}, 'c': 4}
replacements = {
'a/b': 'a/b/x', # replaces 'a/b' with 'a/b/x'
'.*c': 'C', # replaces 'c' with 'C' ('a/c' is removed)
'C': 'D', # replaces 'C' (which was 'c') with 'D'
'.*/c': None, # removes 'a/c'
}
tree2 = rename_remove(tree, replacements)
assert tree2 == {'D': 4, 'a': {'b': {'x': 2}}}
Args:
tree: A nested dictionary.
replacements: Rules specifying `regex` as keys and `replacement` as values
to be used with `m = re.match(regex, key)` and `m.expand(replacement)`
for every `key` independently.
Note that:
1. If any rule matches with `replacement=None`, then the key is removed.
2. The rules are applied in order. It's possible to have multiple
transformations on a single key.
Returns:
Updated `tree` according to rules defined in `replacements`.
"""
replacements = {
re.compile(kk): vv for kk, vv in replacements.items()
}
def rename(k):
for kk, vv in replacements.items():
m = kk.match(k)
if m:
k = k[:m.start()] + m.expand(vv) + k[m.end():]
return k
def should_remove(k):
return any(vv is None and kk.match(k) for kk, vv in replacements.items())
names_and_vals, _ = tree_flatten_with_names(tree)
names_and_vals = [
(rename(k), v) for k, v in names_and_vals if not should_remove(k)
]
return tree_unflatten(names_and_vals)
def tree_compare(tree1, tree2):
"""Returns `(tree1_only, tree2_only, dtype_shape_mismatch)`."""
tree1 = flax.traverse_util.flatten_dict(tree1, sep="/")
tree2 = flax.traverse_util.flatten_dict(tree2, sep="/")
return set(tree1) - set(tree2), set(tree2) - set(tree1), {
k: [(v.dtype, v.shape), (tree2[k].dtype, tree2[k].shape)]
for k, v in tree1.items()
if k in tree2 and (v.dtype != tree2[k].dtype or v.shape != tree2[k].shape)
}
def recover_dtype(a):
"""Numpy's `save` stores bfloat16 type as "void" type, so we recover it."""
if hasattr(a, "dtype") and a.dtype.type is np.void:
assert a.itemsize == 2, "Unknown dtype!"
return a.view(jax.numpy.bfloat16)
else:
return a
# Checkpoint names encode tree structure, you can check out this colab for an
# example of how to recover tree structure from names:
# (internal link)
def save_checkpoint(checkpoint, path, step_copy=None, compressed=False):
"""Util for checkpointing: saves jax pytree objects to the disk.
Args:
checkpoint: arbitrary jax pytree to be saved.
path: a path to save the checkpoint.
step_copy: creates a copy of the checkpoint with `path-{step_copy}` name.
compressed: whether to use np.savez or np.savez_compressed, useful if saving
large buffers that are easily compressed (e.g. repeated or integers).
"""
names_and_vals, _ = tree_flatten_with_names(checkpoint)
io_buffer = io.BytesIO()
if compressed:
np.savez_compressed(io_buffer, **{k: v for k, v in names_and_vals})
else:
np.savez(io_buffer, **{k: v for k, v in names_and_vals})
# In order to be robust to interruptions we first save checkpoint to the
# temporal file and then move to actual path name.
path_tmp = path + "-TEMPORARY"
with gfile.GFile(path_tmp, "wb") as f:
f.write(io_buffer.getvalue())
gfile.rename(path_tmp, path, overwrite=True)
if step_copy is not None:
gfile.copy(path, f"{path}-{step_copy:09d}", overwrite=True)
def recover_tree(keys, values):
"""Recovers a tree as a nested dict from flat names and values.
This function is useful to analyze checkpoints that are saved by our programs
without need to access the exact source code of the experiment. In particular,
it can be used to extract an reuse various subtrees of the scheckpoint, e.g.
subtree of parameters.
Args:
keys: a list of keys, where '/' is used as separator between nodes.
values: a list of leaf values.
Returns:
A nested tree-like dict.
"""
tree = {}
sub_trees = collections.defaultdict(list)
for k, v in zip(keys, values):
if "/" not in k:
tree[k] = v
else:
k_left, k_right = k.split("/", 1)
sub_trees[k_left].append((k_right, v))
for k, kv_pairs in sub_trees.items():
k_subtree, v_subtree = zip(*kv_pairs)
tree[k] = recover_tree(k_subtree, v_subtree)
return tree
def steps(prefix, config, data_size=None, batch_size=None, total_steps=None,
default=ValueError):
"""Gets duration named `prefix` out of `config` and converts it to steps.
Using this function to access a configuration value that denotes some kind
of duration (eg training time, warmup, checkpoint frequency, ...) allows the
duration to be specified in terms of steps, epochs, examples, or percent of
training time, and converts any of these into steps, such that the training
code only deals with steps.
If the result is not an integer step number, it is rounded to the nearest one.
Args:
prefix: The name of the duration to query. The actual config fields can
then be one of `prefix_steps`, `prefix_examples`, or `prefix_epochs`.
config: The dictionary (config) from which to read the duration.
data_size: The total number of training examples in one epoch.
batch_size: The number of examples processed per step.
total_steps: The total number of training steps to run.
default: The default value to return when no duration of the name `prefix`
is found in the `config`. Set to `ValueError` (the default) to raise an
error instead of returning a default value.
Returns:
The number of steps from the config, or the default value.
Raises:
ValueError if there is no such duration in the config and no default is set.
"""
# Be helpful and make sure only match one of the following suffixes.
suffixes = {"steps", "examples", "epochs", "percent"}
matches = {f"{prefix}_{s}" for s in suffixes if f"{prefix}_{s}" in config}
# Note that steps=0 is also a valid value (e.g. to only run evaluators).
assert len(matches) <= 1, f"Only one of '{matches}' should be defined."
if f"{prefix}_steps" in config:
return config[f"{prefix}_steps"]
if batch_size and f"{prefix}_examples" in config:
return max(round(config[f"{prefix}_examples"] / batch_size), 1)
if batch_size and data_size and f"{prefix}_epochs" in config:
steps_per_epoch = data_size / batch_size
return max(round(config[f"{prefix}_epochs"] * steps_per_epoch), 1)
if total_steps and f"{prefix}_percent" in config:
pct = config[f"{prefix}_percent"]
assert 0.0 <= pct <= 1.0, ( # Be helpful, since it's not obvious.
f"Percents should lie in [0.0, 1.0], but {prefix}_percent is {pct}")
return max(round(pct * total_steps), 1)
if default is ValueError:
raise ValueError(
f"Cannot convert {prefix} to steps, due to missing batch_size "
f"({batch_size}), data_size ({data_size}), total_steps ({total_steps})"
", or corresponding entry in config:\n" + "\n".join(config.keys()))
return default
def create_learning_rate_schedule(
total_steps, batch_size=None, data_size=None,
base=1.0, decay_type="stair",
scale_with_batchsize=False, **kw):
"""Creates learning rate schedule, see (internal link).
Args:
total_steps: The total number of steps to run.
batch_size: The global batch-size optionally used for scaling.
data_size: Number of examples in the training data (for epoch conversion).
base: The starting learning-rate (without warmup).
decay_type: 'linear' or 'cosine', 'rsqrt', 'stair'.
scale_with_batchsize: Whether or not to scale lr automatically.
**kw: extra arguments specific to individual decay_types. Also contains
declaration of `{warmup,cooldown}_{steps,epochs,examples}` that applies
on top of any/all decay_type.
Returns:
A function learning_rate(step): float -> {"learning_rate": float}.
"""
warmup_steps = steps(
"warmup", kw, data_size, batch_size, total_steps, default=0)
cooldown_steps = steps(
"cooldown", kw, data_size, batch_size, total_steps, default=0)
# Early catch hard to backtrack errors due to warmup_steps >= total_steps,
# but let it run for 0 and 1 steps used to eval and debug runs.
assert (total_steps <= 1) or (warmup_steps < total_steps), (
"warmup_steps is >= total_steps")
def step_fn(step):
"""Step to learning rate function."""
lr = base
# This implements the linear scaling rule following
# Goyal et al. at arxiv.org/abs/1706.02677.
# The reference batch size in literature is 256, so we scale the lr to
# adjust to the literature lr when bach_size changes.
if scale_with_batchsize:
lr = lr * batch_size / 256.0
progress = (step - warmup_steps) / float(total_steps - warmup_steps)
progress = jnp.clip(progress, 0.0, 1.0)
if decay_type in ("linear", "polynomial"):
power = kw.get("power", 1)
zero = kw.get("end", kw.get("linear_end", 0))
lr = zero + (lr - zero) * (1.0 - progress) ** power
elif decay_type == "cosine":
lr = lr * 0.5 * (1. + jnp.cos(jnp.pi * progress))
elif decay_type == "rsqrt":
timescale = kw.get("timescale", 10_000)
shift = timescale - warmup_steps
lr = jnp.where(
warmup_steps < step, lr / jnp.sqrt((step + shift) / timescale), lr)
elif decay_type == "stair":
i = jnp.searchsorted(jnp.array(kw.get("steps", [])), step + 1)
lr = lr * jnp.take(jnp.array([1.0] + list(kw.get("mults", []))), i)
else:
raise ValueError(f"Unknown lr type {decay_type}")
if warmup_steps:
lr = lr * jnp.minimum(1., step / warmup_steps)
if cooldown_steps:
lr = lr * jnp.minimum(1., (total_steps - step) / cooldown_steps)
return jnp.asarray(lr, dtype=jnp.float32)
return step_fn
def mixup(rng, *things, p=0.1, fold_in=None, n=2, **more_things):
"""Perform mixup https://arxiv.org/abs/1710.09412.
Args:
rng: The random key to use.
*things: further arguments are the arrays to be mixed.
p: the beta/dirichlet concentration parameter, typically 0.1 or 0.2.
fold_in: One of None, "host", "device", or "sample". Whether to sample a
global mixing coefficient, one per host, one per device, or one per
example, respectively. The latter is usually a bad idea.
n: with how many other images an image is mixed. Default mixup is n=2.
**more_things: further kwargs are arrays to be mixed. See also (internal link)
for further experiments and investigations.
Returns:
A new rng key. A list of mixed *things. A dict of mixed **more_things.
"""
rng, rng_m = jax.random.split(rng, 2)
if fold_in == "host":
rng_m = jax.random.fold_in(rng_m, jax.process_index())
elif fold_in in ("device", "sample"):
rng_m = jax.random.fold_in(rng_m, jax.lax.axis_index("batch"))
ashape = (len(things[0]),) if fold_in == "sample" else (1,)
alpha = jax.random.dirichlet(rng_m, jnp.array([p]*n), ashape)
# Sort alpha values in decreasing order. This avoids destroying examples when
# the concentration parameter p is very small, due to Dirichlet's symmetry.
alpha = -jnp.sort(-alpha, axis=-1)
def mix(batch):
if batch is None: return None # For call-side convenience!
def mul(a, b): # B * BHWC -> B111 * BHWC
return b * jnp.expand_dims(a, tuple(range(1, b.ndim)))
return sum(mul(alpha[:, i], jnp.roll(batch, i, axis=0)) for i in range(n))
return rng, map(mix, things), {k: mix(v) for k, v in more_things.items()}
def _sync(x):
return jax.lax.psum(x, "i")
def sync():
"""Syncs hosts and empties async computation queue."""
x = jnp.ones([jax.local_device_count()])
x = jax.device_get(jax.pmap(_sync, "i")(x))
assert x[0] == jax.device_count()
def check_and_compile_patterns(patterns):
"""Validates and compiles a list of param-patterns.
The validation consists of checking for common mistakes, currently only that
the pattern does not start with a slash, because unlike FLAX, our parameter
names don't start with a slash.
Args:
patterns: a single (string) pattern (regex), or a list of patterns.
Returns:
A list of compiled and verified regexes.
"""
if isinstance(patterns, str):
patterns = [patterns]
assert isinstance(patterns, (list, tuple)), patterns
def check_and_compile(pattern):
assert not pattern.startswith("/"), (
f"Big vision parameter names never start with '/': '{pattern}")
return re.compile(pattern)
return list(map(check_and_compile, patterns))
def make_mask_trees(tree, patterns, *, log=None):
"""Returns a boolean mask tree for every pattern (only first match)."""
compiled_patterns = check_and_compile_patterns(patterns)
def matchfirst(name, _):
matches = []
for pattern in compiled_patterns:
matches.append(not any(matches) and bool(pattern.fullmatch(name)))
if log is not None and True in matches and jax.process_index() == 0:
logging.info("%s: %s - matched by %s", log, name,
patterns[matches.index(True)])
return np.array(matches)
multimask = tree_map_with_names(matchfirst, tree)
return [
jax.tree_util.tree_map(lambda matches, i=idx: matches[i], multimask)
for idx in range(len(patterns))
]
@contextlib.contextmanager
def profile(name, ttl=3 * 365 * 24 * 3600, noop=False):
if not noop:
sess = startstop_prof_at_steps(None, name=name, ttl=ttl)
yield
if not noop:
startstop_prof_at_steps(sess, name=name, ttl=ttl)
def startstop_prof(sess, step=None, first_step=0,
log_steps=1, surround=20, **kw):
"""Runs the profiler for `surround` steps around the next `log_steps`."""
first_log = first_step + log_steps - (first_step % log_steps)
# don't start before first!
start = max(first_log - surround//2, first_step + 1)
return startstop_prof_at_steps(sess, step, start, start + surround, **kw)
def startstop_prof_at_steps(
sess, step=None, first_step=None, last_step=None,
name="steps", ttl=3 * 365 * 24 * 3600):
del sess, step, first_step, last_step, name, ttl
pass # TODO: implement using `jax.profiler` API. Needs workdir.
# This is a very minimal variant for open-sourcing. Our internal code makes use
# of multiple internal logging tools instead.
class BigVisionMetricWriter:
"""A class for logging metrics."""
def __init__(self, xid=-1, wid=-1, workdir=None, config=None):
self.step_start(0)
if jax.process_index() != 0: return # Only one host shall write stuff.
self.pool = multiprocessing.pool.ThreadPool(1) # 1 is important here.
self.fname = None
if workdir:
if xid != -1 and wid != -1:
self.fname = os.path.join(workdir,
f"big_vision_{xid}_{wid}_metrics.txt")
else:
self.fname = os.path.join(workdir, "big_vision_metrics.txt")
if config:
with gfile.GFile(os.path.join(workdir, "config.json"), "w") as f:
f.write(config.to_json())
def step_start(self, step):
self.step = step
self.step_metrics = {}
def measure(self, name, value):
"""Logs the metric value."""
if jax.process_index() != 0: return # Only one host shall write stuff.
# Convenience for accepting scalar np/DeviceArrays, as well as N-d single
# scalars, like [[[123]]] or similar, avoiding silly mistakes.
value = np.array(value).squeeze()
# If the value is a scalar, we keep it in mind to append a line to the logs.
# If it has any structure, we instead just log its shape.
value = float(value) if value.ndim == 0 else value.shape
logging.info(f"\u001b[35m[{self.step}]\u001b[0m {name} = {value}")
logging.flush()
self.step_metrics[name] = value
return value # Just for convenience
def step_end(self):
"""Ends a training step, write its full row."""
if not self.step_metrics: return
def write(metrics):
with gfile.GFile(self.fname, "a") as f:
f.write(json.dumps({"step": self.step, **metrics}) + "\n")
if self.fname:
self.pool.apply(lambda: None) # Potentially wait for past writes.
self.pool.apply_async(write, (self.step_metrics,))
def close(self):
self.step_end()
if jax.process_index() == 0:
self.pool.close()
self.pool.join()
def maybe_cleanup_workdir(workdir, cleanup, info):
"""Potentially removes workdirs at end of run for cleanup."""
if not workdir:
return
if not cleanup:
info("Logs/checkpoints are in %s", workdir)
elif jax.process_index() == 0:
gfile.rmtree(workdir)
try: # Only need this on the last work-unit, if already empty.
gfile.remove(os.path.join(workdir, ".."))
except tf.errors.OpError:
pass
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop example.
This is a basic variant of a training loop, good starting point for fancy ones.
"""
# pylint: disable=consider-using-from-import
import functools
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
from tensorflow.io import gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info(
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image", "ops_text"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
write_note("Initializing train dataset...")
train_ds, ntrain_img = input_pipeline.training(config.input)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
total_steps = u.steps("total", config, ntrain_img, batch_size)
def get_steps(name, default=ValueError, cfg=config):
return u.steps(name, cfg, ntrain_img, batch_size, total_steps, default)
u.chrono.inform(total_steps=total_steps, global_bs=batch_size,
steps_per_epoch=ntrain_img / batch_size,
measure=mw.measure, write_note=write_note)
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
write_note(f"Initializing {config.model_name} model...")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = model_mod.Model(
num_classes=config.num_classes, **config.get("model", {}))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@functools.partial(jax.jit, backend="cpu")
def init(rng):
bs = batch_size // jax.device_count()
image_size = tuple(train_ds.element_spec["image"].shape[1:])
no_image = jnp.zeros((bs,) + image_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, no_image))["params"]
# Set bias in the head to a low value, such that loss is small initially.
if "init_head_bias" in config:
params["head"]["bias"] = jnp.full_like(params["head"]["bias"],
config["init_head_bias"])
return params
rng, rng_init = jax.random.split(rng)
with u.chrono.log_timing("z/secs/init"):
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@functools.partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1))
def update_fn(params, opt, rng, images, labels):
"""Update step."""
measurements = {}
if config.get("mixup") and config.mixup.p:
rng, (images, labels), _ = u.mixup(rng, images, labels, **config.mixup)
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index("batch"))
def loss_fn(params, images, labels):
logits, _ = model.apply(
{"params": params}, images,
train=True, rngs={"dropout": rng_model_local})
return getattr(u, config.get("loss", "sigmoid_xent"))(
logits=logits, labels=labels)
l, grads = jax.value_and_grad(loss_fn)(params, images, labels)
l, grads = jax.lax.pmean((l, grads), axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, rng, l, measurements
# We do not jit/pmap this function, because it is passed to evaluator that
# does it later. We output as many intermediate tensors as possible for
# maximal flexibility. Later `jit` will prune out things that are not needed.
def predict_fn(params, image):
logits, out = model.apply({"params": params}, image)
return logits, out
# Only initialize evaluators when they are first needed.
@functools.lru_cache(maxsize=None)
def evaluators():
return eval_common.from_config(
config, {"predict": predict_fn},
lambda s: write_note(f"Init evaluator: {s}…\n{u.chrono.note}"),
lambda key, cfg: get_steps(key, default=None, cfg=cfg),
)
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"opt": opt_cpu,
"chrono": u.chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu, opt_cpu = checkpoint["params"], checkpoint["opt"]
u.chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.get("model"),
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
u.chrono.inform(first_step=first_step)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{u.chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{u.chrono.note}")
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
with u.chrono.log_timing("z/secs/update0", noop=step > first_step + 1):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop, batch["image"], batch["labels"])
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or u.chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
u.chrono.tick(step)
if not np.isfinite(l):
raise RuntimeError(f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
u.chrono.pause(wait_for=(params_repl, opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu = jax.tree_map(lambda x: np.array(x[0]), params_repl)
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu, "opt": opt_cpu, "chrono": u.chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
u.chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators():
if u.itstime(step, log_steps, total_steps, first=log_steps < total_steps,
last=False):
u.chrono.pause(wait_for=params_repl)
u.chrono.tick(step) # Record things like epoch number, core hours etc.
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.chrono.resume()
mw.step_end()
# Run evals after done with training. Running them here guarantees evals
# will run if job is restarted after writting the last checkpoint and
# also supports eval only runs (when total_steps or num_epochs is 0).
mw.step_start(total_steps)
for (name, evaluator, _, prefix) in evaluators():
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
write_note(f"Done!\n{u.chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from big_vision.pp import utils
import tensorflow.compat.v1 as tf
class UtilsTest(tf.test.TestCase):
def test_maybe_repeat(self):
self.assertEqual((1, 1, 1), utils.maybe_repeat(1, 3))
self.assertEqual((1, 2), utils.maybe_repeat((1, 2), 2))
self.assertEqual([1, 2], utils.maybe_repeat([1, 2], 2))
def test_inkeyoutkey(self):
@utils.InKeyOutKey()
def get_pp_fn(shift, scale=0):
def _pp_fn(x):
return scale * x + shift
return _pp_fn
data = {"k_in": 2, "other": 3}
ppfn = get_pp_fn(1, 2, inkey="k_in", outkey="k_out") # pylint: disable=unexpected-keyword-arg
self.assertEqual({"k_in": 2, "k_out": 5, "other": 3}, ppfn(data))
data = {"k": 6, "other": 3}
ppfn = get_pp_fn(1, inkey="k", outkey="k") # pylint: disable=unexpected-keyword-arg
self.assertEqual({"k": 1, "other": 3}, ppfn(data))
data = {"other": 6, "image": 3}
ppfn = get_pp_fn(5, 2)
self.assertEqual({"other": 6, "image": 11}, ppfn(data))
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ops_image."""
import copy
import io
import big_vision.pp.ops_image as pp
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
def get_image_data():
img = tf.random.uniform((640, 480, 3), 0, 255, tf.int32) # Can't ask uint8!?
return {"image": tf.cast(img, tf.uint8)}
class PreprocessOpsTest(tf.test.TestCase):
def tfrun(self, ppfn, data):
# Run once as standalone, as could happen eg in colab.
yield {k: np.array(v) for k, v in ppfn(copy.deepcopy(data)).items()}
# And then once again as part of tfdata pipeline.
# You'd be surprised how much these two differ!
tfdata = tf.data.Dataset.from_tensors(copy.deepcopy(data))
for npdata in tfdata.map(ppfn).as_numpy_iterator():
yield npdata
def test_resize(self):
for data in self.tfrun(pp.get_resize([120, 80]), get_image_data()):
self.assertEqual(data["image"].shape, (120, 80, 3))
def test_resize_small(self):
for data in self.tfrun(pp.get_resize_small(240), get_image_data()):
self.assertEqual(data["image"].shape, (320, 240, 3))
def test_inception_crop(self):
for data in self.tfrun(pp.get_inception_crop(), get_image_data()):
self.assertEqual(data["image"].shape[-1], 3)
def test_decode_jpeg_and_inception_crop(self):
f = io.BytesIO()
plt.imsave(f, get_image_data()["image"].numpy(), format="jpg")
data = {"image": tf.cast(f.getvalue(), tf.string)}
for data in self.tfrun(pp.get_decode_jpeg_and_inception_crop(), data):
self.assertEqual(data["image"].shape[-1], 3)
def test_random_crop(self):
for data in self.tfrun(pp.get_random_crop([120, 80]), get_image_data()):
self.assertEqual(data["image"].shape, (120, 80, 3))
def test_central_crop(self):
for data in self.tfrun(pp.get_central_crop([20, 80]), get_image_data()):
self.assertEqual(data["image"].shape, (20, 80, 3))
def test_random_flip_lr(self):
data_orig = get_image_data()
for data in self.tfrun(pp.get_random_flip_lr(), data_orig):
self.assertTrue(
np.all(data_orig["image"].numpy() == data["image"]) or
np.all(data_orig["image"].numpy() == data["image"][:, ::-1]))
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global Registry for big_vision pp ops.
Author: Joan Puigcerver (jpuigcerver@)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import contextlib
import functools
def parse_name(string_to_parse):
"""Parses input to the registry's lookup function.
Args:
string_to_parse: can be either an arbitrary name or function call
(optionally with positional and keyword arguments).
e.g. "multiclass", "resnet50_v2(filters_factor=8)".
Returns:
A tuple of input name, argument tuple and a keyword argument dictionary.
Examples:
"multiclass" -> ("multiclass", (), {})
"resnet50_v2(9, filters_factor=4)" ->
("resnet50_v2", (9,), {"filters_factor": 4})
Author: Joan Puigcerver (jpuigcerver@)
"""
expr = ast.parse(string_to_parse, mode="eval").body # pytype: disable=attribute-error
if not isinstance(expr, (ast.Attribute, ast.Call, ast.Name)):
raise ValueError(
"The given string should be a name or a call, but a {} was parsed from "
"the string {!r}".format(type(expr), string_to_parse))
# Notes:
# name="some_name" -> type(expr) = ast.Name
# name="module.some_name" -> type(expr) = ast.Attribute
# name="some_name()" -> type(expr) = ast.Call
# name="module.some_name()" -> type(expr) = ast.Call
if isinstance(expr, ast.Name):
return string_to_parse, (), {}
elif isinstance(expr, ast.Attribute):
return string_to_parse, (), {}
def _get_func_name(expr):
if isinstance(expr, ast.Attribute):
return _get_func_name(expr.value) + "." + expr.attr
elif isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError(
"Type {!r} is not supported in a function name, the string to parse "
"was {!r}".format(type(expr), string_to_parse))
def _get_func_args_and_kwargs(call):
args = tuple([ast.literal_eval(arg) for arg in call.args])
kwargs = {
kwarg.arg: ast.literal_eval(kwarg.value) for kwarg in call.keywords
}
return args, kwargs
func_name = _get_func_name(expr.func)
func_args, func_kwargs = _get_func_args_and_kwargs(expr)
return func_name, func_args, func_kwargs
class Registry(object):
"""Implements global Registry.
Authors: Joan Puigcerver (jpuigcerver@), Alexander Kolesnikov (akolesnikov@)
"""
_GLOBAL_REGISTRY = {}
@staticmethod
def global_registry():
return Registry._GLOBAL_REGISTRY
@staticmethod
def register(name, replace=False):
"""Creates a function that registers its input."""
def _register(item):
if name in Registry.global_registry() and not replace:
raise KeyError("The name {!r} was already registered.".format(name))
Registry.global_registry()[name] = item
return item
return _register
@staticmethod
def lookup(lookup_string, kwargs_extra=None):
"""Lookup a name in the registry."""
try:
name, args, kwargs = parse_name(lookup_string)
except ValueError as e:
raise ValueError(f"Error parsing pp:\n{lookup_string}") from e
if kwargs_extra:
kwargs.update(kwargs_extra)
item = Registry.global_registry()[name]
return functools.partial(item, *args, **kwargs)
@contextlib.contextmanager
def temporary_ops(**kw):
"""Registers specified pp ops for use in a `with` block.
Example use:
with pp_registry.remporary_ops(
pow=lambda alpha: lambda d: {k: v**alpha for k, v in d.items()}):
pp = pp_builder.get_preprocess_fn("pow(alpha=2.0)|pow(alpha=0.5)")
features = pp(features)
Args:
**kw: Names are preprocess string function names to be used to specify the
preprocess function. Values are functions that can be called with params
(e.g. the `alpha` param in above example) and return functions to be used
to transform features.
Yields:
A context manager to be used in a `with` statement.
"""
reg = Registry.global_registry()
kw = {f"preprocess_ops.{k}": v for k, v in kw.items()}
for k in kw:
assert k not in reg
for k, v in kw.items():
reg[k] = v
try:
yield
finally:
for k in kw:
del reg[k]
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text-centric preprocessing ops.
All preprocessing ops should return a data processing functors. A data
is represented as a dictionary of (TF) tensors. The functors output a modified
dictionary.
A commonly used key for the tokenized output is "labels".
"""
from big_vision.datasets.imagenet import class_names as imagenet_class_names
from big_vision.pp.registry import Registry
import big_vision.pp.utils as utils
import tensorflow as tf
@Registry.register("preprocess_ops.clip_i1k_label_names")
@utils.InKeyOutKey(indefault="label", outdefault="labels")
def get_pp_clip_i1k_label_names():
"""Convert i1k label numbers to strings, using CLIP's class names."""
def _pp_imagenet_labels(label):
return tf.gather(imagenet_class_names.CLIP_IMAGENET_CLASS_NAMES, label)
return _pp_imagenet_labels
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ops_general."""
import copy
import big_vision.pp.ops_general as pp
import numpy as np
import tensorflow as tf
class PreprocessOpsTest(tf.test.TestCase):
def tfrun(self, ppfn, data):
# Run once as standalone, as could happen eg in colab.
yield {k: np.array(v) for k, v in ppfn(copy.deepcopy(data)).items()}
# And then once again as part of tfdata pipeline.
# You'd be surprised how much these two differ!
tfdata = tf.data.Dataset.from_tensors(copy.deepcopy(data))
for npdata in tfdata.map(ppfn).as_numpy_iterator():
yield npdata
def test_value_range(self):
img = tf.random.uniform((640, 480, 3), 0, 255, tf.int32)
data = {"image": tf.cast(img, tf.uint8)}
for out in self.tfrun(pp.get_value_range(-0.5, 0.5), data):
self.assertLessEqual(np.max(out["image"]), 0.5)
self.assertGreaterEqual(np.min(out["image"]), -0.5)
def test_value_range_custom_input_range(self):
img = tf.random.uniform((640, 480, 3), 0, 255, tf.int32)
data = {"image": tf.cast(img, tf.uint8)}
for out in self.tfrun(pp.get_value_range(-0.5, 0.5, -256, 255, True), data):
self.assertLessEqual(np.max(out["image"]), 0.5)
self.assertGreaterEqual(np.min(out["image"]), 0.0)
def test_get_keep_drop(self):
data = {"image": 1, "labels": 2, "something": 3}
for data_keep in self.tfrun(pp.get_keep("image", "labels"), data):
self.assertAllEqual(set(data_keep.keys()), {"image", "labels"})
for data_drop in self.tfrun(pp.get_drop("image", "labels"), data):
self.assertAllEqual(set(data_drop.keys()), {"something"})
def test_onehot(self):
data = {"labels": tf.constant(2, dtype=tf.int64)}
for out in self.tfrun(pp.get_onehot(4, "labels", multi=True), data):
self.assertAllClose(out["labels"], [0., 0., 1., 0.])
def test_onehot_multi(self):
data = {"labels": tf.constant([2, 3, 0], dtype=tf.int64)}
for out in self.tfrun(pp.get_onehot(4, "labels", multi=False), data):
self.assertAllClose(out["labels"], [
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]])
for out in self.tfrun(pp.get_onehot(4, "labels", multi=True), data):
self.assertAllClose(out["labels"], [1., 0., 1., 1.])
def test_onehot_2d(self):
data = {"labels": tf.constant([[2, 3], [0, 1]], dtype=tf.int64)}
for out in self.tfrun(pp.get_onehot(4, "labels", multi=False), data):
self.assertAllClose(out["labels"], [
[[0., 0., 1., 0.], [0., 0., 0., 1.]],
[[1., 0., 0., 0.], [0., 1., 0., 0.]]])
def test_onehot_smoothing(self):
data = {"labels": tf.constant([2, 3, 0], dtype=tf.int64)}
for out in self.tfrun(
pp.get_onehot(4, "labels", multi=False, on=0.8, off=0.1), data):
self.assertAllClose(out["labels"], [
[0.1, 0.1, 0.8, 0.1],
[0.1, 0.1, 0.1, 0.8],
[0.8, 0.1, 0.1, 0.1]])
for out in self.tfrun(
pp.get_onehot(4, "labels", multi=True, on=0.8, off=0.1), data):
self.assertAllClose(out["labels"], [0.8, 0.1, 0.8, 0.8])
def test_squeeze_last_dim(self):
data = {"image": tf.constant(np.zeros((32, 32, 3, 1)))}
for out in self.tfrun(pp.get_squeeze_last_dim(), data):
self.assertAllEqual(out["image"].shape, [32, 32, 3])
def test_pad_to_shape(self):
desired_shape = (8, 10)
for input_shape in [(8, 4), (8, 3), (8, 10), (8, 1)]:
data = {"x": tf.ones(input_shape, dtype=tf.float32)}
for out in self.tfrun(
pp.get_pad_to_shape(desired_shape, pad_value=-1, key="x"), data):
self.assertEqual(
tf.reduce_sum(out["x"]),
2 * np.product(input_shape) - np.product(desired_shape))
def test_flatten(self):
d = {"a": {"b": tf.constant([1, 2, 3])}, "c": "str"}
self.assertEqual(pp.get_flatten()(d), {
"a/b": tf.constant([1, 2, 3]),
"c": "str"
})
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing builder."""
from absl import logging
from big_vision.pp.registry import Registry
def get_preprocess_fn(pp_pipeline, log_data=True):
"""Transform an input string into the preprocessing function.
The minilanguage is as follows:
fn1|fn2(arg, arg2,...)|...
And describes the successive application of the various `fn`s to the input,
where each function can optionally have one or more arguments, which are
either positional or key/value, as dictated by the `fn`.
The output preprocessing function expects a dictionary as input. This
dictionary should have a key "image" that corresponds to a 3D tensor
(height x width x channel).
Args:
pp_pipeline: A string describing the pre-processing pipeline. If empty or
None, no preprocessing will be executed.
log_data: Whether to log the data before and after preprocessing. Note:
Remember set to `False` in eager mode to avoid too many log messages.
Returns:
preprocessing function.
Raises:
ValueError: if preprocessing function name is unknown
"""
ops = []
if pp_pipeline:
for fn_name in pp_pipeline.split("|"):
if not fn_name: continue # Skip empty section instead of error.
try:
ops.append(Registry.lookup(f"preprocess_ops.{fn_name}")())
except SyntaxError as err:
raise ValueError(f"Syntax error on: {fn_name}") from err
def _preprocess_fn(data):
"""The preprocessing function that is returned."""
# Apply all the individual steps in sequence.
if log_data:
logging.info("Data before pre-processing:\n%s", data)
for op in ops:
data = op(data)
# Validate input
if not isinstance(data, dict):
raise ValueError("Argument `data` must be a dictionary, "
"not %s" % str(type(data)))
if log_data:
logging.info("Data after pre-processing:\n%s", data)
return data
return _preprocess_fn
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image-centric preprocessing ops.
All preprocessing ops should return a data processing functors. A data
is represented as a dictionary of (TF) tensors. The functors output a modified
dictionary.
The key named "image" is commonly used for the image, and is a 3D tensor of
shape (height x width x channels).
"""
from big_vision.pp import autoaugment
from big_vision.pp import utils
from big_vision.pp.registry import Registry
import tensorflow as tf
@Registry.register("preprocess_ops.decode")
@utils.InKeyOutKey()
def get_decode(channels=3):
"""Decode an encoded image string, see tf.io.decode_image."""
def _decode(image):
return tf.io.decode_image(image, channels=channels, expand_animations=False)
return _decode
@Registry.register("preprocess_ops.resize")
@utils.InKeyOutKey()
def get_resize(size, method="bilinear", antialias=False):
"""Resizes image to a given size.
Args:
size: either an integer H, where H is both the new height and width
of the resized image, or a list or tuple [H, W] of integers, where H and W
are new image"s height and width respectively.
method: resize method, see tf.image.resize docs for options.
antialias: see tf.image.resize. Ideally set to True for all new configs.
Returns:
A function for resizing an image.
"""
size = utils.maybe_repeat(size, 2)
def _resize(image):
"""Resizes image to a given size."""
# Note: use TF-2 version of tf.image.resize as the version in TF-1 is
# buggy: https://github.com/tensorflow/tensorflow/issues/6720.
# In particular it was not equivariant with rotation and lead to the network
# to learn a shortcut in self-supervised rotation task, if rotation was
# applied after resize.
dtype = image.dtype
image = tf.image.resize(image, size, method=method, antialias=antialias)
return tf.cast(image, dtype)
return _resize
@Registry.register("preprocess_ops.resize_small")
@utils.InKeyOutKey()
def get_resize_small(smaller_size, method="area", antialias=False):
"""Resizes the smaller side to `smaller_size` keeping aspect ratio.
Args:
smaller_size: an integer, that represents a new size of the smaller side of
an input image.
method: the resize method. `area` is a meaningful, bwd-compat default.
antialias: see tf.image.resize. Ideally set to True for all new configs.
Returns:
A function, that resizes an image and preserves its aspect ratio.
Note:
backwards-compat for "area"+antialias tested here:
(internal link)
"""
def _resize_small(image): # pylint: disable=missing-docstring
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (
tf.cast(smaller_size, tf.float32) /
tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
dtype = image.dtype
image = tf.image.resize(image, (h, w), method=method, antialias=antialias)
return tf.cast(image, dtype)
return _resize_small
@Registry.register("preprocess_ops.inception_crop")
@utils.InKeyOutKey()
def get_inception_crop(size=None, area_min=5, area_max=100,
method="bilinear", antialias=False):
"""Makes inception-style image crop.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
size: Resize image to [size, size] after crop.
area_min: minimal crop area.
area_max: maximal crop area.
method: rezied method, see tf.image.resize docs for options.
antialias: see tf.image.resize. Ideally set to True for all new configs.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(image): # pylint: disable=missing-docstring
begin, crop_size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
crop = tf.slice(image, begin, crop_size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
crop.set_shape([None, None, image.shape[-1]])
if size:
crop = get_resize(size, method, antialias)({"image": crop})["image"]
return crop
return _inception_crop
@Registry.register("preprocess_ops.decode_jpeg_and_inception_crop")
@utils.InKeyOutKey()
def get_decode_jpeg_and_inception_crop(size=None, area_min=5, area_max=100,
method="bilinear", antialias=False):
"""Decode jpeg string and make inception-style image crop.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
size: Resize image to [size, size] after crop.
area_min: minimal crop area.
area_max: maximal crop area.
method: rezied method, see tf.image.resize docs for options.
antialias: see tf.image.resize. Ideally set to True for all new configs.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(image_data): # pylint: disable=missing-docstring
shape = tf.image.extract_jpeg_shape(image_data)
begin, crop_size, _ = tf.image.sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(begin)
target_height, target_width, _ = tf.unstack(crop_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_data, crop_window, channels=3)
if size:
image = get_resize(size, method, antialias)({"image": image})["image"]
return image
return _inception_crop
@Registry.register("preprocess_ops.random_crop")
@utils.InKeyOutKey()
def get_random_crop(crop_size):
"""Makes a random crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of the
random crop, or a list or tuple [H, W] of integers, where H and W are
height and width of the random crop respectively.
Returns:
A function, that applies random crop.
"""
crop_size = utils.maybe_repeat(crop_size, 2)
def _crop(image):
return tf.image.random_crop(image, (*crop_size, image.shape[-1]))
return _crop
@Registry.register("preprocess_ops.central_crop")
@utils.InKeyOutKey()
def get_central_crop(crop_size):
"""Makes central crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of the
central crop, or a list or tuple [H, W] of integers, where H and W are
height and width of the central crop respectively.
Returns:
A function, that applies central crop.
"""
crop_size = utils.maybe_repeat(crop_size, 2)
def _crop(image):
h, w = crop_size[0], crop_size[1]
dy = (tf.shape(image)[0] - h) // 2
dx = (tf.shape(image)[1] - w) // 2
return tf.image.crop_to_bounding_box(image, dy, dx, h, w)
return _crop
@Registry.register("preprocess_ops.flip_lr")
@utils.InKeyOutKey()
def get_random_flip_lr():
"""Flips an image horizontally with probability 50%."""
def _random_flip_lr_pp(image):
return tf.image.random_flip_left_right(image)
return _random_flip_lr_pp
@Registry.register("preprocess_ops.vgg_value_range")
@utils.InKeyOutKey()
def get_vgg_value_range(
mean=(0.485 * 255, 0.456 * 255, 0.406 * 255),
std=(0.229 * 255, 0.224 * 255, 0.225 * 255)
):
"""VGG-style preprocessing, subtracts mean and divides by stddev.
This preprocessing is very common for ImageNet pre-trained models since VGG,
and to this day the standard for models coming from most PyTorch codes.
Args:
mean: Tuple of values to be subtracted.
std: Tuple of values to be divided by.
Returns:
A function to rescale the values.
"""
mean = tf.constant(mean, tf.float32)
std = tf.constant(std, tf.float32)
def _vgg_value_range(image):
return (tf.cast(image, tf.float32) - mean) / std
return _vgg_value_range
@Registry.register("preprocess_ops.randaug")
@utils.InKeyOutKey()
def get_randaug(num_layers: int = 2, magnitude: int = 10):
"""Creates a function that applies RandAugment.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
a function that applies RandAugment.
"""
def _randaug(image):
return autoaugment.distort_image_with_randaugment(
image, num_layers, magnitude)
return _randaug
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from unittest import mock
from absl.testing import absltest
from big_vision.pp import registry
class RegistryTest(absltest.TestCase):
def setUp(self):
super(RegistryTest, self).setUp()
# Mock global registry in each test to keep them isolated and allow for
# concurrent tests.
self.addCleanup(mock.patch.stopall)
self.global_registry = dict()
self.mocked_method = mock.patch.object(
registry.Registry, "global_registry",
return_value=self.global_registry).start()
def test_parse_name(self):
name, args, kwargs = registry.parse_name("f")
self.assertEqual(name, "f")
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
name, args, kwargs = registry.parse_name("f()")
self.assertEqual(name, "f")
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
name, args, kwargs = registry.parse_name("func(a=0,b=1,c='s')")
self.assertEqual(name, "func")
self.assertEqual(args, ())
self.assertEqual(kwargs, {"a": 0, "b": 1, "c": "s"})
name, args, kwargs = registry.parse_name("func(1,'foo',3)")
self.assertEqual(name, "func")
self.assertEqual(args, (1, "foo", 3))
self.assertEqual(kwargs, {})
name, args, kwargs = registry.parse_name("func(1,'2',a=3,foo='bar')")
self.assertEqual(name, "func")
self.assertEqual(args, (1, "2"))
self.assertEqual(kwargs, {"a": 3, "foo": "bar"})
name, args, kwargs = registry.parse_name("foo.bar.func(a=0,b=(1),c='s')")
self.assertEqual(name, "foo.bar.func")
self.assertEqual(kwargs, dict(a=0, b=1, c="s"))
with self.assertRaises(SyntaxError):
registry.parse_name("func(0")
with self.assertRaises(SyntaxError):
registry.parse_name("func(a=0,,b=0)")
with self.assertRaises(SyntaxError):
registry.parse_name("func(a=0,b==1,c='s')")
with self.assertRaises(ValueError):
registry.parse_name("func(a=0,b=undefined_name,c='s')")
def test_register(self):
# pylint: disable=unused-variable
@registry.Registry.register("func1")
def func1():
pass
self.assertLen(registry.Registry.global_registry(), 1)
def test_lookup_function(self):
@registry.Registry.register("func1")
def func1(arg1, arg2, arg3): # pylint: disable=unused-variable
return arg1, arg2, arg3
self.assertTrue(callable(registry.Registry.lookup("func1")))
self.assertEqual(registry.Registry.lookup("func1")(1, 2, 3), (1, 2, 3))
self.assertEqual(
registry.Registry.lookup("func1(arg3=9)")(1, 2), (1, 2, 9))
self.assertEqual(
registry.Registry.lookup("func1(arg2=9,arg1=99)")(arg3=3), (99, 9, 3))
self.assertEqual(
registry.Registry.lookup("func1(arg2=9,arg1=99)")(arg1=1, arg3=3),
(1, 9, 3))
self.assertEqual(
registry.Registry.lookup("func1(1)")(1, 2), (1, 1, 2))
self.assertEqual(
registry.Registry.lookup("func1(1)")(arg3=3, arg2=2), (1, 2, 3))
self.assertEqual(
registry.Registry.lookup("func1(1, 2)")(3), (1, 2, 3))
self.assertEqual(
registry.Registry.lookup("func1(1, 2)")(arg3=3), (1, 2, 3))
self.assertEqual(
registry.Registry.lookup("func1(1, arg2=2)")(arg3=3), (1, 2, 3))
self.assertEqual(
registry.Registry.lookup("func1(1, arg3=2)")(arg2=3), (1, 3, 2))
self.assertEqual(
registry.Registry.lookup("func1(1, arg3=2)")(3), (1, 3, 2))
with self.assertRaises(TypeError):
registry.Registry.lookup("func1(1, arg2=2)")(3)
with self.assertRaises(TypeError):
registry.Registry.lookup("func1(1, arg3=3)")(arg3=3)
with self.assertRaises(TypeError):
registry.Registry.lookup("func1(1, arg3=3)")(arg1=3)
with self.assertRaises(SyntaxError):
registry.Registry.lookup("func1(arg1=1, 3)")(arg2=3)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing utils."""
from collections import abc
def maybe_repeat(arg, n_reps):
if not isinstance(arg, abc.Sequence):
arg = (arg,) * n_reps
return arg
class InKeyOutKey(object):
"""Decorator for preprocessing ops, which adds `inkey` and `outkey` arguments.
Note: Only supports single-input single-output ops.
"""
def __init__(self, indefault="image", outdefault="image", with_data=False):
self.indefault = indefault
self.outdefault = outdefault
self.with_data = with_data
def __call__(self, orig_get_pp_fn):
def get_ikok_pp_fn(*args, key=None,
inkey=self.indefault, outkey=self.outdefault, **kw):
orig_pp_fn = orig_get_pp_fn(*args, **kw)
def _ikok_pp_fn(data):
# Optionally allow the function to get the full data dict as aux input.
if self.with_data:
data[key or outkey] = orig_pp_fn(data[key or inkey], data=data)
else:
data[key or outkey] = orig_pp_fn(data[key or inkey])
return data
return _ikok_pp_fn
return get_ikok_pp_fn
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
This code is forked from
https://github.com/tensorflow/tpu/blob/11d0db15cf1c3667f6e36fecffa111399e008acd/models/official/efficientnet/autoaugment.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dataclasses
import inspect
import math
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as contrib_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
@dataclasses.dataclass
class HParams:
"""Parameters for AutoAugment and RandAugment."""
cutout_const: int
translate_const: int
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See ((internal link)) for details.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = HParams(
cutout_const=100, translate_const=250)
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf.logging.info('Using RandAug.')
augmentation_hparams = HParams(
cutout_const=40, translate_const=100)
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ops_text."""
import big_vision.pp.ops_text as pp
import tensorflow as tf
class PpOpsTest(tf.test.TestCase):
def test_get_pp_clip_i1k_label_names(self):
op = pp.get_pp_clip_i1k_label_names()
labels = op({"label": tf.constant([0, 1])})["labels"].numpy().tolist()
self.assertAllEqual(labels, ["tench", "goldfish"])
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic tensor preprocessing ops.
All preprocessing ops should return a data processing functors. A data
is represented as a dictionary of (TF) tensors. The functors output a modified
dictionary.
"""
from big_vision.pp import utils
from big_vision.pp.registry import Registry
import big_vision.utils as bv_utils
import jax
import numpy as np
import tensorflow as tf
@Registry.register("preprocess_ops.value_range")
@utils.InKeyOutKey()
def get_value_range(vmin=-1, vmax=1, in_min=0, in_max=255.0, clip_values=False):
"""Transforms a [in_min,in_max] image to [vmin,vmax] range.
Input ranges in_min/in_max can be equal-size lists to rescale the invidudal
channels independently.
Args:
vmin: A scalar. Output max value.
vmax: A scalar. Output min value.
in_min: A scalar or a list of input min values to scale. If a list, the
length should match to the number of channels in the image.
in_max: A scalar or a list of input max values to scale. If a list, the
length should match to the number of channels in the image.
clip_values: Whether to clip the output values to the provided ranges.
Returns:
A function to rescale the values.
"""
def _value_range(image):
"""Scales values in given range."""
in_min_t = tf.constant(in_min, tf.float32)
in_max_t = tf.constant(in_max, tf.float32)
image = tf.cast(image, tf.float32)
image = (image - in_min_t) / (in_max_t - in_min_t)
image = vmin + image * (vmax - vmin)
if clip_values:
image = tf.clip_by_value(image, vmin, vmax)
return image
return _value_range
@Registry.register("preprocess_ops.lookup")
@utils.InKeyOutKey()
def get_lookup(mapping, npzkey="fnames", sep=None):
"""Map string to number."""
# For NumPy files, we use the `npzkey` array in that file as the list of
# strings which are mapped to their index in that array.
# This is especially useful when other data (eg precomputed predictions)
# goes along with this mapping, to have everything in one place (the npz).
if mapping.endswith(".npz"):
with tf.io.gfile.GFile(mapping, "rb") as f:
keys = np.array(np.load(f, allow_pickle=False)[npzkey])
vals = np.arange(len(keys))
# Otherwise, we simply use the file as a text file, with either of:
# - a string per line, mapped to its line-number
# - a pair, separated by `sep` per line, first value being the string, second
# value being the integer that the string is mapped to.
else:
with tf.io.gfile.GFile(mapping, "r") as f:
buf = f.read()
if sep is None: # values are the line numbers
keys = buf.splitlines()
vals = np.arange(len(keys))
else: # each line is key<sep>val, also make val int
keys, vals = zip(*[l.split(sep) for l in buf.splitlines()])
vals = [int(v) for v in vals]
def _do_the_mapping(needle):
"""Map string to number."""
with tf.init_scope(): # (Originally added for performance reasons.)
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys, vals), -1)
return table.lookup(needle)
return _do_the_mapping
@Registry.register("preprocess_ops.onehot")
def get_onehot(depth,
key="labels",
key_result=None,
multi=True,
on=1.0,
off=0.0):
"""One-hot encodes the input.
Args:
depth: Length of the one-hot vector (how many classes).
key: Key of the data to be one-hot encoded.
key_result: Key under which to store the result (same as `key` if None).
multi: If there are multiple labels, whether to merge them into the same
"multi-hot" vector (True) or keep them as an extra dimension (False).
on: Value to fill in for the positive label (default: 1).
off: Value to fill in for negative labels (default: 0).
Returns:
Data dictionary.
"""
def _onehot(data):
# When there's more than one label, this is significantly more efficient
# than using tf.one_hot followed by tf.reduce_max; we tested.
labels = data[key]
labels = tf.cast(labels, tf.int64) # both scatter and one_hot expect this
if labels.shape.rank > 0 and multi:
x = tf.scatter_nd(labels[:, None], tf.ones(tf.shape(labels)[0]), (depth,))
x = tf.clip_by_value(x, 0, 1) * (on - off) + off
else:
x = tf.one_hot(labels, depth, on_value=on, off_value=off)
data[key_result or key] = x
return data
return _onehot
@Registry.register("preprocess_ops.keep")
def get_keep(*keys):
"""Keeps only the given keys."""
def _keep(data):
return {k: v for k, v in data.items() if k in keys}
return _keep
@Registry.register("preprocess_ops.drop")
def get_drop(*keys):
"""Drops the given keys."""
def _drop(data):
return {k: v for k, v in data.items() if k not in keys}
return _drop
@Registry.register("preprocess_ops.copy")
def get_copy(inkey, outkey):
"""Copies value of `inkey` into `outkey`."""
def _copy(data):
# A "semi-deep" copy. deepcopy doesn't work when tf tensors are part of the
# game. What we want, is to only copy the python structure (dicts, lists)
# and keep tensors as they are, since we never modify them in-place anyways.
# The following achieves exactly that.
data[outkey] = jax.tree_map(lambda x: x, data[inkey])
return data
return _copy
@Registry.register("preprocess_ops.squeeze_last_dim")
@utils.InKeyOutKey()
def get_squeeze_last_dim():
def _squeeze_last_dim(x):
return tf.squeeze(x, axis=-1)
return _squeeze_last_dim
@Registry.register("preprocess_ops.concat")
def get_concat(inkeys, outkey=None, axis=-1):
"""Concatenates elements along some axis."""
def _concat(data):
data[outkey or inkeys[0]] = tf.concat([data[k] for k in inkeys], axis)
return data
return _concat
@Registry.register("preprocess_ops.rag_tensor")
@utils.InKeyOutKey()
def get_rag_tensor():
"""Converts the specified feature to ragged tensor."""
def rag_tensor(raw_tensor):
# Note: Add one more dimension as `from_tensor` requires at least rank 2.
return tf.RaggedTensor.from_tensor(raw_tensor[None])
return rag_tensor
@Registry.register("preprocess_ops.pad_to_shape")
@utils.InKeyOutKey()
def get_pad_to_shape(shape, pad_value=0):
"""Pads tensor to specified `shape`."""
def _pad_to_shape(x):
assert len(x.shape.as_list()) == len(shape)
paddings = [[0, shape[i] - tf.shape(x)[i]] for i in range(len(shape))]
constant_value = tf.constant(pad_value, x.dtype)
ret = tf.pad(x, paddings, constant_values=constant_value)
ret.set_shape(shape)
return ret
return _pad_to_shape
@Registry.register("preprocess_ops.flatten")
def get_flatten():
"""Flattens the keys of data with separator '/'."""
def flatten(data):
flat, _ = bv_utils.tree_flatten_with_names(data)
return dict(flat)
return flatten
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from big_vision.pp import builder
from big_vision.pp import ops_general # pylint: disable=unused-import
from big_vision.pp import ops_image # pylint: disable=unused-import
import numpy as np
import tensorflow.compat.v1 as tf
class BuilderTest(tf.test.TestCase):
def testSingle(self):
pp_fn = builder.get_preprocess_fn("resize(256)")
x = np.random.randint(0, 256, [640, 480, 3])
image = pp_fn({"image": x})["image"]
self.assertEqual(image.numpy().shape, (256, 256, 3))
def testEmpty(self):
pp_fn = builder.get_preprocess_fn("||inception_crop|||resize(256)||")
# Typical image input
x = np.random.randint(0, 256, [640, 480, 3])
image = pp_fn({"image": x})["image"]
self.assertEqual(image.numpy().shape, (256, 256, 3))
def testPreprocessingPipeline(self):
pp_str = ("inception_crop|resize(256)|resize((256, 256))|"
"central_crop((80, 120))|flip_lr|value_range(0,1)|"
"value_range(-1,1)")
pp_fn = builder.get_preprocess_fn(pp_str)
# Typical image input
x = np.random.randint(0, 256, [640, 480, 3])
image = pp_fn({"image": x})["image"]
self.assertEqual(image.numpy().shape, (80, 120, 3))
self.assertLessEqual(np.max(image.numpy()), 1)
self.assertGreaterEqual(np.min(image.numpy()), -1)
def testNumArgsException(self):
x = np.random.randint(0, 256, [640, 480, 3])
for pp_str in [
"inception_crop(1)",
"resize()",
"resize(1, 1, 1)"
"flip_lr(1)",
"central_crop()",
]:
with self.assertRaises(BaseException):
builder.get_preprocess_fn(pp_str)(x)
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing functions for CLIP with Pixels Only (CLIPPO)."""
from absl import logging
from big_vision.pp import utils
from big_vision.pp.registry import Registry
import numpy as np
import tensorflow as tf
@Registry.register("preprocess_ops.render_unifont")
@utils.InKeyOutKey(indefault="texts", outdefault="image")
def get_pp_render_text(image_size: int, font_size: int = 16, max_chars=768,
background_brightness=127, text_brightness=0,
lower=True, monospace=False, spacing=1, min_width=4,
resize_method="area"):
"""Renders text as image, using binary Unifont.
Largely based on Jeffrey Sorensen's text rendering implementation.
Args:
image_size: Width/height of output image.
font_size: Font size to use. Recommended to leave at 16, as this requires
no resizing, and is safe.
max_chars: Maximum inpute characters to render, to make faster.
background_brightness: (r, g, b) of background pixels.
text_brightness: (r, g, b) of text pixels.
lower: whether to lowercase.
monospace: if False, text characters are horizontally trimmed according to
`spacing` and `minwidth` args.
spacing: # pixels between each letter.
min_width: Minimum width of each letter. Useful to make sure e.g. spaces and
full stops aren't collapsed to nothing.
resize_method: resize method to use if fontsize != 16.
Returns:
Function which renders text as an image.
"""
bit_embedding = np.zeros((0x200000, 32), dtype=np.uint8)
colpattern = {64: range(32),
32: sorted(tuple(range(0, 32, 4)) + tuple(range(2, 32, 4)))}
unifont_path = "big_vision/pp/proj/clippo/unifont-9.0.06.hex"
unifont_upper_path = "big_vision/pp/proj/clippo/unifont_upper-9.0.06.hex"
with tf.io.gfile.GFile(unifont_path) as f:
for line in f:
row = int(line[0:4], 16)
hexbits = line[5:-1]
bit_embedding[row, colpattern[len(hexbits)]] = bytearray.fromhex(hexbits)
with tf.io.gfile.GFile(unifont_upper_path) as f:
for line in f:
row = int(line[0:6], 16)
hexbits = line[7:-1]
bit_embedding[row, colpattern[len(hexbits)]] = bytearray.fromhex(hexbits)
params = tf.constant(bit_embedding, dtype=tf.uint8)
def trim_letter(letter):
"""Remove white space based on the letter size."""
v = tf.reduce_max(letter, axis=0)
has_pixels = tf.reshape(tf.where(v), (-1,), name="RS5")
no_pixels = tf.equal(tf.reduce_max(v), 0)
first = tf.cond(no_pixels, lambda: tf.constant(0, tf.int64),
lambda: has_pixels[0])
last = tf.cond(no_pixels, lambda: tf.constant(0, tf.int64),
lambda: has_pixels[-1])
first = tf.maximum(first - spacing, 0)
last = tf.maximum(last + spacing, first + min_width)
return tf.RaggedTensor.from_tensor(tf.transpose(letter[:, first:last]))
def to_image(rendered, width, height=None):
"""Makes a nice square image from a long string of rendered charcaters."""
height = height or width
max_letter_width = tf.reduce_max(rendered.row_lengths(1))
row_lengths = tf.cast(tf.cumsum(rendered.row_lengths(1)), tf.float32)
div = tf.cast(width - max_letter_width, tf.float32) # For rounding errors.
row_idx = tf.cast(tf.floor(row_lengths / div), tf.int64)
row_idx = tf.RaggedTensor.from_value_rowids(tf.range(tf.shape(rendered)[0]),
row_idx)
trimmed = tf.gather(rendered, row_idx, axis=0)
trimmed = trimmed.merge_dims(1, 2)
trimmed = trimmed.to_tensor(default_value=0)
trimmed = tf.transpose(trimmed, (0, 2, 1))
trimmed = tf.reshape(trimmed, (-1, tf.shape(trimmed)[-1]), name="RS4")
trimmed = trimmed[:height]
wpad = width - tf.shape(trimmed)[1]
hpad = height - tf.shape(trimmed)[0]
padded = tf.pad(trimmed, [[0, hpad], [0, wpad]])
tf.assert_equal(tf.shape(padded), tf.constant((height, width)))
return tf.ensure_shape(padded, (width, height))
def render(text):
if lower:
text = tf.strings.lower(text)
text = tf.reshape(text, (-1,))[0]
ids = tf.strings.unicode_decode(text, "UTF-8")
if max_chars:
ids = ids[:max_chars]
embed = tf.nn.embedding_lookup(params, ids) # Get the letters
# Each letter is 32 uint8s, but we want binary 16x16 grid.
# The following does that in a rather hard to parse way.
vertical = tf.reshape(embed, [1, -1])
repl = tf.reshape(tf.transpose(tf.tile(vertical, multiples=[8, 1])), [-1])
ones = tf.ones_like(repl)
index = tf.cumsum(ones, exclusive=True)
sevens = tf.cast(tf.fill(tf.shape(repl), 7), tf.uint8)
moded = tf.bitwise.bitwise_and(index, sevens)
shifted = tf.bitwise.right_shift(repl,
tf.bitwise.bitwise_xor(moded, sevens))
anded = tf.bitwise.bitwise_and(shifted, ones)
# And finally, letters; binary, 0 = background, 1 = letter.
letters = tf.reshape(anded, [tf.shape(ids)[0], 16, 16])
if font_size != 16:
logging.warning("The unifont text rendering function is highly optimized "
"for font size 16; using font size %i might lead to "
"suboptimal rendering and might degrade performance.",
font_size)
letters = tf.image.resize(letters[..., None], (font_size, font_size),
method=resize_method, antialias=True)
letters = tf.squeeze(letters, axis=-1)
if monospace:
letters = tf.RaggedTensor.from_tensor(tf.transpose(letters, (0, 2, 1)))
else:
letters = tf.RaggedTensor.from_tensor(letters)
signature = tf.RaggedTensorSpec(shape=(None, font_size), ragged_rank=1,
dtype=letters.dtype)
letters = tf.map_fn(trim_letter, letters, fn_output_signature=signature)
img = to_image(letters, image_size)[..., None] # A nice square image.
img *= (text_brightness - background_brightness) # Rescale value range.
img += background_brightness
return tf.image.grayscale_to_rgb(tf.cast(img, tf.uint8))
return render
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT-related preprocessing ops (using WordPiece tokenizer)."""
from big_vision.pp import utils
from big_vision.pp.registry import Registry
import tensorflow as tf
import tensorflow_text
# Internally using
# BasicTokenizer
# https://github.com/tensorflow/text/blob/df5250d6cf1069990df4bf55154867391ab5381a/tensorflow_text/python/ops/bert_tokenizer.py#L67
# WordpieceTokenizer
# https://github.com/tensorflow/text/blob/master/tensorflow_text/python/ops/wordpiece_tokenizer.py
def _create_bert_tokenizer(vocab_path):
with tf.io.gfile.GFile(vocab_path) as f:
vocab = f.read().split("\n")
cls_token = vocab.index("[CLS]")
return cls_token, tensorflow_text.BertTokenizer(
vocab_path,
token_out_type=tf.int32,
lower_case=True,
)
@Registry.register("preprocess_ops.bert_tokenize")
@utils.InKeyOutKey(indefault=None, outdefault="labels")
def get_pp_bert_tokenize(vocab_path, max_len, sample_if_multi=True):
"""Extracts tokens with tensorflow_text.BertTokenizer.
Args:
vocab_path: Path to a file containing the vocabulry for the WordPiece
tokenizer. It's the "vocab.txt" file in the zip file downloaded from
the original repo https://github.com/google-research/bert
max_len: Number of tokens after tokenization.
sample_if_multi: Whether the first text should be taken (if set to `False`),
or whether a random text should be tokenized.
Returns:
A preprocessing Op.
"""
cls_token, tokenizer = _create_bert_tokenizer(vocab_path)
def _pp_bert_tokenize(labels):
labels = tf.reshape(labels, (-1,))
labels = tf.concat([labels, [""]], axis=0)
if sample_if_multi:
num_texts = tf.maximum(tf.shape(labels)[0] - 1, 1) # Don't sample "".
txt = labels[tf.random.uniform([], 0, num_texts, dtype=tf.int32)]
else:
txt = labels[0] # Always works, since we append "" earlier on.
token_ids = tokenizer.tokenize(txt[None])
padded_token_ids, mask = tensorflow_text.pad_model_inputs(
token_ids, max_len - 1)
del mask # Recovered from zero padding in model.
count = tf.shape(padded_token_ids)[0]
padded_token_ids = tf.concat(
[tf.fill([count, 1], cls_token), padded_token_ids], axis=1)
return padded_token_ids[0]
return _pp_bert_tokenize
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bert_ops."""
import tempfile
from big_vision import input_pipeline
import big_vision.pp.builder as pp_builder
import big_vision.pp.ops_general # pylint: disable=unused-import
from big_vision.pp.proj.flaxformer import bert_ops # pylint: disable=unused-import
import tensorflow as tf
# BERT vocabulary for testing.
_BERT_VOCAB = [
"[PAD]",
"[UNK]",
"more",
"than",
"one",
"[CLS]",
"[SEP]",
]
def _create_ds(pp_str, tensor_slices, num_examples, remove_tpu_dtypes):
return input_pipeline.make_for_inference(
tf.data.Dataset.from_tensor_slices(tensor_slices),
num_ex_per_process=[num_examples],
preprocess_fn=pp_builder.get_preprocess_fn(
pp_str, remove_tpu_dtypes=remove_tpu_dtypes),
batch_size=num_examples,
)[0]
class BertOpsTest(tf.test.TestCase):
def test_tokenize(self):
inkey = "texts"
vocab_path = f"{tempfile.mkdtemp()}/vocab.txt"
with open(vocab_path, "w") as f:
f.write("\n".join(_BERT_VOCAB))
pp_str = (
f"bert_tokenize(inkey='{inkey}', vocab_path='{vocab_path}', "
f"max_len=5)"
)
tensor_slices = {
inkey: tf.ragged.constant([["one more"], ["more than one"], [""]])
}
ds = _create_ds(pp_str, tensor_slices, 3, True)
self.assertAllEqual(
next(iter(ds))["labels"],
[[5, 4, 2, 0, 0], [5, 2, 3, 4, 0], [5, 0, 0, 0, 0]],
)
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops."""
from big_vision.pp import utils
from big_vision.pp.registry import Registry
import numpy as np
import tensorflow as tf
@Registry.register("preprocess_ops.rgb_to_grayscale_to_rgb")
@utils.InKeyOutKey(indefault="image", outdefault="image")
def get_rgb_to_grayscale_to_rgb():
def _rgb_to_grayscale_to_rgb(image):
return tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return _rgb_to_grayscale_to_rgb
@Registry.register("preprocess_ops.nyu_eval_crop")
def get_nyu_eval_crop():
"""Crops labels and image to valid eval area."""
# crop_h = slice(45, 471)
# crop_w = slice(41, 601)
crop_h_start = 54
crop_h_size = 426
crop_w_start = 41
crop_w_size = 560
def _pp(data):
tf.debugging.assert_equal(tf.shape(data["labels"]), (480, 640, 1))
tf.debugging.assert_equal(tf.shape(data["image"]), (480, 640, 3))
data["labels"] = tf.slice(data["labels"],
[crop_h_start, crop_w_start, 0],
[crop_h_size, crop_w_size, -1])
data["image"] = tf.slice(data["image"],
[crop_h_start, crop_w_start, 0],
[crop_h_size, crop_w_size, -1])
return data
return _pp
@Registry.register("preprocess_ops.nyu_depth")
@utils.InKeyOutKey(indefault="depth", outdefault="labels")
def get_nyu_depth():
"""Preprocesses NYU depth data."""
def _pp(depth):
return tf.expand_dims(tf.cast(depth, tf.float32), -1)
return _pp
@Registry.register("preprocess_ops.coco_panoptic")
def get_coco_panoptic_pp():
"""COCO-panoptic: produces a mask with labels and a mask with instance ids.
Instance channel will have values between 1 and N, and -1 for non-annotated
pixels.
Returns:
COCO panoptic preprocessign op.
"""
def _coco_panoptic(data):
instance_ids = tf.cast(data["panoptic_objects"]["id"], tf.int32)
instance_labels = tf.cast(data["panoptic_objects"]["label"], tf.int32)
# Convert image with ids split in 3 channels into a an integer id.
id_mask = tf.einsum(
"hwc,c->hw",
tf.cast(data["panoptic_image"], tf.int32),
tf.constant([1, 256, 256**2], tf.int32))
# Broadcast into N boolean masks one per instance_id.
n_masks = tf.cast(
id_mask[:, :, None] == instance_ids[None, None, :], tf.int32)
# Merge into a semantic and an instance id mask.
# Note: pixels which do not belong to any mask, will have value=-1
# which creates an empty one_hot masks.
# Number instances starting at 1 (0 is treated specially by make_canonical).
instance_idx = tf.range(tf.shape(instance_ids)[-1])
instances = tf.einsum("hwc,c->hw", n_masks, instance_idx + 1)
semantics = tf.einsum("hwc,c->hw", n_masks, instance_labels + 1)
data["instances"] = instances[:, :, None]
data["semantics"] = semantics[:, :, None]
return data
return _coco_panoptic
@Registry.register("preprocess_ops.make_canonical")
@utils.InKeyOutKey(indefault="labels", outdefault="labels")
def get_make_canonical(random=False, main_sort_axis="y"):
"""Makes id mask ordered from left to right based on the center of mass."""
# By convention, instances are in the last channel.
def _make_canonical(image):
"""Op."""
instimg = image[..., -1]
# Compute binary instance masks. Note, we do not touch 0 and neg. ids.
ids = tf.unique(tf.reshape(instimg, [-1])).y
ids = ids[ids > 0]
n_masks = tf.cast(
instimg[None, :, :] == ids[:, None, None], tf.int32)
if not random:
f = lambda x: tf.reduce_mean(tf.cast(tf.where(x), tf.float32), axis=0)
centers = tf.map_fn(f, tf.cast(n_masks, tf.int64), dtype=tf.float32)
centers = tf.reshape(centers, (tf.shape(centers)[0], 2))
major = {"y": 0, "x": 1}[main_sort_axis]
perm = tf.argsort(
centers[:, 1 - major] +
tf.cast(tf.shape(instimg)[major], tf.float32) * centers[:, major])
n_masks = tf.gather(n_masks, perm)
else:
n_masks = tf.random.shuffle(n_masks)
idx = tf.range(tf.shape(ids)[0])
can_mask = tf.einsum("chw,c->hw", n_masks, idx + 2) - 1
# Now, all 0 and neg. ids have collapsed to -1. Thus, we recover 0 id from
# the original mask.
can_mask = tf.where(instimg == 0, 0, can_mask)
return tf.concat([image[..., :-1], can_mask[..., None]], axis=-1)
return _make_canonical
@Registry.register("preprocess_ops.inception_box")
def get_inception_box(
*, area=(0.05, 1.0), aspect=(0.75, 1.33), min_obj_cover=0.0,
outkey="box", inkey="image"):
"""Creates an inception style bounding box which can be used to crop."""
def _inception_box(data):
_, _, box = tf.image.sample_distorted_bounding_box(
tf.shape(data[inkey]),
area_range=area,
aspect_ratio_range=aspect,
min_object_covered=min_obj_cover,
bounding_boxes=(data["objects"]["bbox"][None, :, :]
if min_obj_cover else tf.zeros([0, 0, 4])),
use_image_if_no_bounding_boxes=True)
# bbox is [[[y0,x0,y1,x1]]]
data[outkey] = (box[0, 0, :2], box[0, 0, 2:] - box[0, 0, :2])
return data
return _inception_box
@Registry.register("preprocess_ops.crop_box")
@utils.InKeyOutKey(with_data=True)
def get_crop_box(*, boxkey="box"):
"""Crops an image according to bounding box in `boxkey`."""
def _crop_box(image, data):
shape = tf.shape(image)[:-1]
begin, size = data[boxkey]
begin = tf.cast(begin * tf.cast(shape, tf.float32), tf.int32)
size = tf.cast(size * tf.cast(shape, tf.float32), tf.int32)
begin = tf.concat([begin, tf.constant((0,))], axis=0)
size = tf.concat([size, tf.constant((-1,))], axis=0)
crop = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
crop.set_shape([None, None, image.shape[-1]])
return crop
return _crop_box
@Registry.register("preprocess_ops.randu")
def get_randu(key):
"""Creates a random uniform float [0, 1) in `key`."""
def _randu(data):
data[key] = tf.random.uniform([])
return data
return _randu
@Registry.register("preprocess_ops.det_fliplr")
@utils.InKeyOutKey(with_data=True)
def get_det_fliplr(*, randkey="fliplr"):
"""Flips an image horizontally based on `randkey`."""
# NOTE: we could unify this with regular flip when randkey=None.
def _det_fliplr(orig_image, data):
flip_image = tf.image.flip_left_right(orig_image)
flip = tf.cast(data[randkey] > 0.5, orig_image.dtype)
return flip_image * flip + orig_image * (1 - flip)
return _det_fliplr
@Registry.register("preprocess_ops.strong_hash")
@utils.InKeyOutKey(indefault="tfds_id", outdefault="tfds_id")
def get_strong_hash():
"""Preprocessing that hashes a string."""
def _strong_hash(string):
return tf.strings.to_hash_bucket_strong(
string,
np.iinfo(int).max, [3714561454027272724, 8800639020734831960])
return _strong_hash
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pp_ops."""
import copy
from big_vision.pp.proj.uvim import pp_ops as pp
import numpy as np
import tensorflow as tf
def get_image_data(dtype=tf.uint8):
img = tf.random.uniform((640, 320, 3), 0, 255, tf.int32) # Can't ask uint8!?
return {"image": tf.cast(img, dtype)}
class PreprocessOpsTest(tf.test.TestCase):
def tfrun(self, ppfn, data={}): # pylint: disable=dangerous-default-value
# Run once as standalone, as could happen eg in colab.
yield {k: np.array(v) for k, v in ppfn(copy.deepcopy(data)).items()}
if not data: # tf.data doesn't like completely empty dict...k
data = {"dummy": 0.0}
# And then once again as part of tfdata pipeline.
# You'd be surprised how much these two differ!
tfdata = tf.data.Dataset.from_tensors(copy.deepcopy(data))
for npdata in tfdata.map(ppfn).as_numpy_iterator():
yield npdata
def test_randu(self):
for output in self.tfrun(pp.get_randu("flip")):
self.assertEqual(output["flip"].shape, ())
self.assertAllGreaterEqual(output["flip"], 0.0)
self.assertAllLessEqual(output["flip"], 1.0)
def test_det_flip_lr(self):
# Test both dtypes to make it can be applied correctly to both.
for dtype in [tf.uint8, tf.float32]:
image_data = get_image_data(dtype)
for out in self.tfrun(pp.get_det_fliplr(randkey="rand"),
{"rand": 0.1, **image_data}):
self.assertTrue(np.all(image_data["image"] == out["image"]))
self.assertEqual(out["image"].dtype, dtype)
for out in self.tfrun(pp.get_det_fliplr(randkey="rand"),
{"rand": 0.6, **image_data}):
self.assertTrue(np.all(image_data["image"][:, ::-1, :] == out["image"]))
self.assertEqual(out["image"].dtype, dtype)
def test_inception_box(self):
for out in self.tfrun(pp.get_inception_box(), get_image_data()):
self.assertEqual(out["box"][0].shape, (2,))
self.assertEqual(out["box"][1].shape, (2,))
def test_crop_box(self):
data = get_image_data()
data["box"] = (tf.constant([0.5, 0.4]), tf.constant([0.25, 0.3]))
for out in self.tfrun(pp.get_crop_box(), data):
self.assertEqual(out["image"].shape, (160, 96, 3))
self.assertAllEqual(
data["image"][320:320 + 160, 128:128 + 96],
out["image"])
def test_make_canonical(self):
orig = np.array([
[1, 0, 3, 3, -1],
[1, 0, 3, 3, -1],
[1, 0, 2, 2, 2],
[1, 0, 0, -1, -1]
], np.int32)[:, :, None]
expected = np.array([
[2, 0, 1, 1, -1],
[2, 0, 1, 1, -1],
[2, 0, 3, 3, 3],
[2, 0, 0, -1, -1]
], np.int32)[:, :, None]
for out in self.tfrun(pp.get_make_canonical(), {"labels": orig}):
self.assertTrue(np.all(out["labels"] == expected))
# Test it only affects last channel.
for out in self.tfrun(pp.get_make_canonical(),
{"labels": tf.tile(orig, (1, 1, 3))}):
self.assertAllEqual(out["labels"][..., 0], orig[..., 0])
self.assertAllEqual(out["labels"][..., 1], orig[..., 0])
self.assertAllEqual(out["labels"][..., 2], expected[..., 0])
def test_nyu_depth(self):
image = tf.zeros((5, 7, 3), dtype=tf.uint8)
depth = tf.zeros((5, 7), dtype=tf.float16)
data = {
"image": image,
"depth": depth
}
output = pp.get_nyu_depth()(data)
self.assertEqual(output["image"].shape, (5, 7, 3))
self.assertEqual(output["image"].dtype, tf.uint8)
self.assertEqual(output["labels"].shape, (5, 7, 1))
self.assertEqual(output["labels"].dtype, tf.float32)
def test_nyu_eval_crop(self):
image = tf.zeros((480, 640, 3), dtype=tf.uint8)
depth = tf.zeros((480, 640), dtype=tf.float16)
data = {
"image": image,
"depth": depth
}
data = pp.get_nyu_depth()(data)
output = pp.get_nyu_eval_crop()(data)
self.assertEqual(output["image"].shape, (426, 560, 3))
self.assertEqual(output["image"].dtype, tf.uint8)
self.assertEqual(output["labels"].shape, (426, 560, 1))
self.assertEqual(output["labels"].dtype, tf.float32)
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download and prepare TFDS datasets for the big_vision codebase.
This python script covers cifar10, cifar100, oxford_iiit_pet
and oxford_flowers10.
If you want to integrate other public or custom datasets, please follow:
https://www.tensorflow.org/datasets/catalog/overview
"""
from absl import app
import tensorflow_datasets as tfds
def main(argv):
if len(argv) > 1 and "download_tfds_datasets.py" in argv[0]:
datasets = argv[1:]
else:
datasets = [
"cifar10",
"cifar100",
"oxford_iiit_pet",
"oxford_flowers102",
"imagenet_v2",
]
for d in datasets:
tfds.load(name=d, download=True)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that loads a model and only runs evaluators."""
from functools import partial
import importlib
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.utils as u
from clu import parameter_overview
import flax
import flax.jax_utils as flax_utils
import jax
import jax.config
import jax.numpy as jnp
from ml_collections import config_flags
from tensorflow.io import gfile
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info("Workdir: %s", workdir)
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image"]):
importlib.import_module(f"big_vision.pp.{m}")
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
def write_note(note):
if jax.process_index() == 0:
logging.info("NOTE: %s", note)
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
u.chrono.inform(measure=mw.measure, write_note=write_note)
write_note(f"Initializing {config.model_name} model...")
assert config.get("model.reinit") is None, (
"I don't think you want any part of the model to be re-initialized.")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model_kw = dict(config.get("model", {}))
if "num_classes" in config: # Make it work for regular + image_text.
model_kw["num_classes"] = config.num_classes
model = model_mod.Model(**model_kw)
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend="cpu")
def init(rng):
input_shapes = config.get("init_shapes", [(1, 224, 224, 3)])
input_types = config.get("init_types", [jnp.float32] * len(input_shapes))
dummy_inputs = [jnp.zeros(s, t) for s, t in zip(input_shapes, input_types)]
things = flax.core.unfreeze(model.init(rng, *dummy_inputs))
return things.get("params", {})
with u.chrono.log_timing("z/secs/init"):
params_cpu = init(jax.random.PRNGKey(42))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
mw.measure("num_params", num_params)
# The use-case for not loading an init is testing and debugging.
if config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.get("model"),
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(params_cpu, msg="loaded params")
write_note("Replicating...")
params_repl = flax_utils.replicate(params_cpu)
def predict_fn(params, *a, **kw):
return model.apply({"params": params}, *a, **kw)
evaluators = eval_common.from_config(
config, {"predict": predict_fn, "model": model},
lambda s: write_note(f"Initializing evaluator: {s}..."),
lambda key, cfg: 1, # Ignore log_steps, always run.
)
# Allow running for multiple steps can be useful for couple cases:
# 1. non-deterministic evaluators
# 2. warmup when timing evaluators (eg compile cache etc).
for s in range(config.get("eval_repeats", 1)):
mw.step_start(s)
for (name, evaluator, _, prefix) in evaluators:
write_note(f"{name} evaluation step {s}...")
with u.profile(name, noop=name in config.get("no_profile", [])):
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.sync() # sync barrier to get correct measurements
u.chrono.flush_timings()
mw.step_end()
write_note("Done!")
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
if workdir and flags.FLAGS.cleanup and jax.process_index() == 0:
gfile.rmtree(workdir)
try: # Only need this on the last work-unit, if already empty.
gfile.remove(os.path.join(workdir, ".."))
except tf.errors.OpError:
pass
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data functions, dispatch calls to the requested dataset."""
import importlib
# Note: intentionally not using ABC to avoid forcing implementation of every
# method, since one can imagine train-only datasets for example.
class DataSource:
"""The API that any data source should implement."""
def get_tfdata(self, ordered):
"""Creates this data object as a tf.data.Dataset.
This will be called separately in each process, and it is up to the dataset
implementation to shard it accordingly if desired!
Args:
ordered: if True, the dataset should use deterministic ordering, if False
it may have undefined ordering. Think of True == val, False == train.
Returns:
A tf.data.Dataset object.
Raises:
RuntimeError: if not implemented by the dataset, but called.
"""
raise RuntimeError("not implemented for {self.__class__.__name__}")
@property
def total_examples(self):
"""Returns number of examples in the dataset, regardless of sharding."""
raise RuntimeError("not implemented for {self.__class__.__name__}")
def num_examples_per_process(self, nprocess=None):
"""Returns a list of the numer of examples for each process.
This is only needed for datasets that should go through make_for_inference.
Args:
nprocess: the number of processes, use `jax.process_count()` if None.
Returns:
Returns a list of the numer of examples for each process.
Ideally, this would always be `[total() / nprocess] * nprocess`, but in
reality we can almost never perfectly shard a dataset across arbitrary
number of processes.
One alternative option that can work in some cases is to not even shard
the dataset and thus return `[num_examples()] * nprocess.
Raises:
RuntimeError: if not implemented by the dataset, but called.
"""
raise RuntimeError("not implemented for {self.__class__.__name__}")
def get(name, **kw):
if name.startswith("bv:"):
mod = importlib.import_module(f"big_vision.datasets.{name[3:]}")
return mod.DataSource(**kw)
else:
mod = importlib.import_module("big_vision.datasets.tfds")
return mod.DataSource(name, **kw)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Datasets as data source for big_vision."""
import functools
import big_vision.datasets.core as ds_core
import jax
import overrides
import tensorflow_datasets as tfds
class DataSource(ds_core.DataSource):
"""Use TFDS as a data source."""
def __init__(self, name, split, data_dir=None, skip_decode=("image",)):
self.builder = _get_builder(name, data_dir)
self.split = split
# Each host is responsible for a fixed subset of data
process_splits = tfds.even_splits(split, jax.process_count())
self.process_split = process_splits[jax.process_index()]
self.skip_decoders = {
f: tfds.decode.SkipDecoding()
for f in skip_decode
if f in self.builder.info.features
}
@overrides.overrides
def get_tfdata(self, ordered=False):
return self.builder.as_dataset(
split=self.process_split,
shuffle_files=not ordered,
read_config=tfds.ReadConfig(
skip_prefetch=True, # We prefetch after pipeline.
try_autocache=False, # We control this, esp. for few-shot.
add_tfds_id=True,
),
decoders=self.skip_decoders)
@property
@overrides.overrides
def total_examples(self):
return self.builder.info.splits[self.split].num_examples
@overrides.overrides
def num_examples_per_process(self, nprocess=None):
splits = tfds.even_splits(self.split, nprocess or jax.process_count())
return [self.builder.info.splits[s].num_examples for s in splits]
@functools.lru_cache(maxsize=None)
def _get_builder(dataset, data_dir):
return tfds.builder(dataset, data_dir=data_dir, try_gcs=True)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imagenet class names."""
# Copied from
# https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
CLIP_IMAGENET_CLASS_NAMES = [
'tench', 'goldfish', 'great white shark', 'tiger shark', 'hammerhead shark',
'electric ray', 'stingray', 'rooster', 'hen', 'ostrich', 'brambling',
'goldfinch', 'house finch', 'junco', 'indigo bunting', 'American robin',
'bulbul', 'jay', 'magpie', 'chickadee', 'American dipper',
'kite (bird of prey)', 'bald eagle', 'vulture', 'great grey owl',
'fire salamander', 'smooth newt', 'newt', 'spotted salamander', 'axolotl',
'American bullfrog', 'tree frog', 'tailed frog', 'loggerhead sea turtle',
'leatherback sea turtle', 'mud turtle', 'terrapin', 'box turtle',
'banded gecko', 'green iguana', 'Carolina anole',
'desert grassland whiptail lizard', 'agama', 'frilled-necked lizard',
'alligator lizard', 'Gila monster', 'European green lizard', 'chameleon',
'Komodo dragon', 'Nile crocodile', 'American alligator', 'triceratops',
'worm snake', 'ring-necked snake', 'eastern hog-nosed snake',
'smooth green snake', 'kingsnake', 'garter snake', 'water snake',
'vine snake', 'night snake', 'boa constrictor', 'African rock python',
'Indian cobra', 'green mamba', 'sea snake', 'Saharan horned viper',
'eastern diamondback rattlesnake', 'sidewinder rattlesnake', 'trilobite',
'harvestman', 'scorpion', 'yellow garden spider', 'barn spider',
'European garden spider', 'southern black widow', 'tarantula',
'wolf spider', 'tick', 'centipede', 'black grouse', 'ptarmigan',
'ruffed grouse', 'prairie grouse', 'peafowl', 'quail', 'partridge',
'african grey parrot', 'macaw', 'sulphur-crested cockatoo', 'lorikeet',
'coucal', 'bee eater', 'hornbill', 'hummingbird', 'jacamar', 'toucan',
'duck', 'red-breasted merganser', 'goose', 'black swan', 'tusker',
'echidna', 'platypus', 'wallaby', 'koala', 'wombat', 'jellyfish',
'sea anemone', 'brain coral', 'flatworm', 'nematode', 'conch', 'snail',
'slug', 'sea slug', 'chiton', 'chambered nautilus', 'Dungeness crab',
'rock crab', 'fiddler crab', 'red king crab', 'American lobster',
'spiny lobster', 'crayfish', 'hermit crab', 'isopod', 'white stork',
'black stork', 'spoonbill', 'flamingo', 'little blue heron', 'great egret',
'bittern bird', 'crane bird', 'limpkin', 'common gallinule',
'American coot', 'bustard', 'ruddy turnstone', 'dunlin', 'common redshank',
'dowitcher', 'oystercatcher', 'pelican', 'king penguin', 'albatross',
'grey whale', 'killer whale', 'dugong', 'sea lion', 'Chihuahua',
'Japanese Chin', 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel',
'Papillon', 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound',
'Basset Hound', 'Beagle', 'Bloodhound', 'Bluetick Coonhound',
'Black and Tan Coonhound', 'Treeing Walker Coonhound', 'English foxhound',
'Redbone Coonhound', 'borzoi', 'Irish Wolfhound', 'Italian Greyhound',
'Whippet', 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki',
'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier',
'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier',
'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', 'Norwich Terrier',
'Yorkshire Terrier', 'Wire Fox Terrier', 'Lakeland Terrier',
'Sealyham Terrier', 'Airedale Terrier', 'Cairn Terrier',
'Australian Terrier', 'Dandie Dinmont Terrier', 'Boston Terrier',
'Miniature Schnauzer', 'Giant Schnauzer', 'Standard Schnauzer',
'Scottish Terrier', 'Tibetan Terrier', 'Australian Silky Terrier',
'Soft-coated Wheaten Terrier', 'West Highland White Terrier', 'Lhasa Apso',
'Flat-Coated Retriever', 'Curly-coated Retriever', 'Golden Retriever',
'Labrador Retriever', 'Chesapeake Bay Retriever',
'German Shorthaired Pointer', 'Vizsla', 'English Setter', 'Irish Setter',
'Gordon Setter', 'Brittany dog', 'Clumber Spaniel',
'English Springer Spaniel', 'Welsh Springer Spaniel', 'Cocker Spaniel',
'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz', 'Schipperke',
'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie', 'Komondor',
'Old English Sheepdog', 'Shetland Sheepdog', 'collie', 'Border Collie',
'Bouvier des Flandres dog', 'Rottweiler', 'German Shepherd Dog',
'Dobermann', 'Miniature Pinscher', 'Greater Swiss Mountain Dog',
'Bernese Mountain Dog', 'Appenzeller Sennenhund', 'Entlebucher Sennenhund',
'Boxer', 'Bullmastiff', 'Tibetan Mastiff', 'French Bulldog', 'Great Dane',
'St. Bernard', 'husky', 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian',
'Affenpinscher', 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog',
'Great Pyrenees dog', 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond',
'brussels griffon', 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi',
'Toy Poodle', 'Miniature Poodle', 'Standard Poodle',
'Mexican hairless dog (xoloitzcuintli)', 'grey wolf', 'Alaskan tundra wolf',
'red wolf or maned wolf', 'coyote', 'dingo', 'dhole', 'African wild dog',
'hyena', 'red fox', 'kit fox', 'Arctic fox', 'grey fox', 'tabby cat',
'tiger cat', 'Persian cat', 'Siamese cat', 'Egyptian Mau', 'cougar', 'lynx',
'leopard', 'snow leopard', 'jaguar', 'lion', 'tiger', 'cheetah',
'brown bear', 'American black bear', 'polar bear', 'sloth bear', 'mongoose',
'meerkat', 'tiger beetle', 'ladybug', 'ground beetle', 'longhorn beetle',
'leaf beetle', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee',
'ant', 'grasshopper', 'cricket insect', 'stick insect', 'cockroach',
'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly',
'damselfly', 'red admiral butterfly', 'ringlet butterfly',
'monarch butterfly', 'small white butterfly', 'sulphur butterfly',
'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber',
'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine',
'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse',
'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox',
'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep',
'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle',
'arabian camel', 'llama', 'weasel', 'mink', 'European polecat',
'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo',
'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon',
'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur',
'black-and-white colobus', 'proboscis monkey', 'marmoset',
'white-headed capuchin', 'howler monkey', 'titi monkey',
'Geoffroy\'s spider monkey', 'common squirrel monkey', 'ring-tailed lemur',
'indri', 'Asian elephant', 'African bush elephant', 'red panda',
'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish',
'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus',
'abaya', 'academic gown', 'accordion', 'acoustic guitar',
'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance',
'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can',
'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon',
'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell',
'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow',
'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap',
'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker',
'military hat (bearskin or shako)', 'beer bottle', 'beer glass',
'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder',
'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie',
'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow',
'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate',
'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train',
'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe',
'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit',
'cardboard box / carton', 'car wheel', 'automated teller machine',
'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello',
'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw',
'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet',
'Christmas stocking', 'church', 'movie theater', 'cleaver',
'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug',
'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard',
'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet',
'cowboy boot', 'cowboy hat', 'cradle', 'construction crane', 'crash helmet',
'crate', 'infant bed', 'Crock Pot', 'croquet ball', 'crutch', 'cuirass',
'dam', 'desk', 'desktop computer', 'rotary dial telephone', 'diaper',
'digital clock', 'digital watch', 'dining table', 'dishcloth', 'dishwasher',
'disc brake', 'dock', 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum',
'drumstick', 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar',
'electric locomotive', 'entertainment center', 'envelope',
'espresso machine', 'face powder', 'feather boa', 'filing cabinet',
'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute',
'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen',
'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat',
'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart',
'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano',
'greenhouse', 'radiator grille', 'grocery store', 'guillotine', 'hair clip',
'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer',
'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica',
'harp', 'combine harvester', 'hatchet', 'holster', 'home theater',
'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar',
'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron',
'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw',
'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade',
'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library',
'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick',
'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass',
'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights',
'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask',
'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet',
'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can',
'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl',
'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped',
'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa',
'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van',
'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier',
'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer', 'oil filter',
'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart', 'oxygen mask',
'product packet / packaging', 'paddle', 'paddle wheel', 'padlock',
'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel', 'parachute',
'parallel bars', 'park bench', 'parking meter', 'railroad car', 'patio',
'payphone', 'pedestal', 'pencil case', 'pencil sharpener', 'perfume',
'Petri dish', 'photocopier', 'plectrum', 'Pickelhaube', 'picket fence',
'pickup truck', 'pier', 'piggy bank', 'pill bottle', 'pillow',
'ping-pong ball', 'pinwheel', 'pirate ship', 'drink pitcher', 'block plane',
'planetarium', 'plastic bag', 'plate rack', 'farm plow', 'plunger',
'Polaroid camera', 'pole', 'police van', 'poncho', 'pool table',
'soda bottle', 'plant pot', 'potter\'s wheel', 'power drill', 'prayer rug',
'printer', 'prison', 'missile', 'projector', 'hockey puck', 'punching bag',
'purse', 'quill', 'quilt', 'race car', 'racket', 'radiator', 'radio',
'radio telescope', 'rain barrel', 'recreational vehicle',
'fishing casting reel', 'reflex camera', 'refrigerator', 'remote control',
'restaurant', 'revolver', 'rifle', 'rocking chair', 'rotisserie', 'eraser',
'rugby ball', 'ruler measuring stick', 'sneaker', 'safe', 'safety pin',
'salt shaker', 'sandal', 'sarong', 'saxophone', 'scabbard',
'weighing scale', 'school bus', 'schooner', 'scoreboard', 'CRT monitor',
'screw', 'screwdriver', 'seat belt', 'sewing machine', 'shield',
'shoe store', 'shoji screen / room divider', 'shopping basket',
'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski',
'balaclava ski mask', 'sleeping bag', 'slide rule', 'sliding door',
'slot machine', 'snorkel', 'snowmobile', 'snowplow', 'soap dispenser',
'soccer ball', 'sock', 'solar thermal collector', 'sombrero', 'soup bowl',
'keyboard space bar', 'space heater', 'space shuttle', 'spatula',
'motorboat', 'spider web', 'spindle', 'sports car', 'spotlight', 'stage',
'steam locomotive', 'through arch bridge', 'steel drum', 'stethoscope',
'scarf', 'stone wall', 'stopwatch', 'stove', 'strainer', 'tram',
'stretcher', 'couch', 'stupa', 'submarine', 'suit', 'sundial', 'sunglasses',
'sunglasses', 'sunscreen', 'suspension bridge', 'mop', 'sweatshirt',
'swim trunks / shorts', 'swing', 'electrical switch', 'syringe',
'table lamp', 'tank', 'tape player', 'teapot', 'teddy bear', 'television',
'tennis ball', 'thatched roof', 'front curtain', 'thimble',
'threshing machine', 'throne', 'tile roof', 'toaster', 'tobacco shop',
'toilet seat', 'torch', 'totem pole', 'tow truck', 'toy store', 'tractor',
'semi-trailer truck', 'tray', 'trench coat', 'tricycle', 'trimaran',
'tripod', 'triumphal arch', 'trolleybus', 'trombone', 'hot tub',
'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle', 'upright piano',
'vacuum cleaner', 'vase', 'vaulted or arched ceiling', 'velvet fabric',
'vending machine', 'vestment', 'viaduct', 'violin', 'volleyball',
'waffle iron', 'wall clock', 'wallet', 'wardrobe', 'military aircraft',
'sink', 'washing machine', 'water bottle', 'water jug', 'water tower',
'whiskey jug', 'whistle', 'hair wig', 'window screen', 'window shade',
'Windsor tie', 'wine bottle', 'airplane wing', 'wok', 'wooden spoon',
'wool', 'split-rail fence', 'shipwreck', 'sailboat', 'yurt', 'website',
'comic book', 'crossword', 'traffic or street sign', 'traffic light',
'dust jacket', 'menu', 'plate', 'guacamole', 'consomme', 'hot pot',
'trifle', 'ice cream', 'popsicle', 'baguette', 'bagel', 'pretzel',
'cheeseburger', 'hot dog', 'mashed potatoes', 'cabbage', 'broccoli',
'cauliflower', 'zucchini', 'spaghetti squash', 'acorn squash',
'butternut squash', 'cucumber', 'artichoke', 'bell pepper', 'cardoon',
'mushroom', 'Granny Smith apple', 'strawberry', 'orange', 'lemon', 'fig',
'pineapple', 'banana', 'jackfruit', 'cherimoya (custard apple)',
'pomegranate', 'hay', 'carbonara', 'chocolate syrup', 'dough', 'meatloaf',
'pizza', 'pot pie', 'burrito', 'red wine', 'espresso', 'tea cup', 'eggnog',
'mountain', 'bubble', 'cliff', 'coral reef', 'geyser', 'lakeshore',
'promontory', 'sandbar', 'beach', 'valley', 'volcano', 'baseball player',
'bridegroom', 'scuba diver', 'rapeseed', 'daisy', 'yellow lady\'s slipper',
'corn', 'acorn', 'rose hip', 'horse chestnut seed', 'coral fungus',
'agaric', 'gyromitra', 'stinkhorn mushroom', 'earth star fungus',
'hen of the woods mushroom', 'bolete', 'corn cob', 'toilet paper'
]
# ImageNet-A and ImageNet-R do not use the full label space of ImageNet.
# These were copied from third_party/py/robustness_metrics/datasets/tfds.py
# Kudos to mjlm@ who helped us notice this.
IMAGENET_A_LABELSET = [
6, 11, 13, 15, 17, 22, 23, 27, 30, 37, 39, 42, 47, 50, 57, 70, 71, 76, 79,
89, 90, 94, 96, 97, 99, 105, 107, 108, 110, 113, 124, 125, 130, 132, 143,
144, 150, 151, 207, 234, 235, 254, 277, 283, 287, 291, 295, 298, 301, 306,
307, 308, 309, 310, 311, 313, 314, 315, 317, 319, 323, 324, 326, 327, 330,
334, 335, 336, 347, 361, 363, 372, 378, 386, 397, 400, 401, 402, 404, 407,
411, 416, 417, 420, 425, 428, 430, 437, 438, 445, 456, 457, 461, 462, 470,
472, 483, 486, 488, 492, 496, 514, 516, 528, 530, 539, 542, 543, 549, 552,
557, 561, 562, 569, 572, 573, 575, 579, 589, 606, 607, 609, 614, 626, 627,
640, 641, 642, 643, 658, 668, 677, 682, 684, 687, 701, 704, 719, 736, 746,
749, 752, 758, 763, 765, 768, 773, 774, 776, 779, 780, 786, 792, 797, 802,
803, 804, 813, 815, 820, 823, 831, 833, 835, 839, 845, 847, 850, 859, 862,
870, 879, 880, 888, 890, 897, 900, 907, 913, 924, 932, 933, 934, 937, 943,
945, 947, 951, 954, 956, 957, 959, 971, 972, 980, 981, 984, 986, 987, 988,
]
# Also check out https://github.com/hendrycks/imagenet-r/blob/master/eval.py
IMAGENET_R_LABELSET = [
1, 2, 4, 6, 8, 9, 11, 13, 22, 23, 26, 29, 31, 39, 47, 63, 71, 76, 79, 84,
90, 94, 96, 97, 99, 100, 105, 107, 113, 122, 125, 130, 132, 144, 145, 147,
148, 150, 151, 155, 160, 161, 162, 163, 171, 172, 178, 187, 195, 199, 203,
207, 208, 219, 231, 232, 234, 235, 242, 245, 247, 250, 251, 254, 259, 260,
263, 265, 267, 269, 276, 277, 281, 288, 289, 291, 292, 293, 296, 299, 301,
308, 309, 310, 311, 314, 315, 319, 323, 327, 330, 334, 335, 337, 338, 340,
341, 344, 347, 353, 355, 361, 362, 365, 366, 367, 368, 372, 388, 390, 393,
397, 401, 407, 413, 414, 425, 428, 430, 435, 437, 441, 447, 448, 457, 462,
463, 469, 470, 471, 472, 476, 483, 487, 515, 546, 555, 558, 570, 579, 583,
587, 593, 594, 596, 609, 613, 617, 621, 629, 637, 657, 658, 701, 717, 724,
763, 768, 774, 776, 779, 780, 787, 805, 812, 815, 820, 824, 833, 847, 852,
866, 875, 883, 889, 895, 907, 928, 931, 932, 933, 934, 936, 937, 943, 945,
947, 948, 949, 951, 953, 954, 957, 963, 965, 967, 980, 981, 983, 988,
]
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BiT models as in the paper (ResNet V2) w/ loading of public weights.
See reproduction proof: http://(internal link)
"""
import functools
import re
from typing import Optional, Sequence, Union
from big_vision import utils as u
from big_vision.models import bit
from big_vision.models import common
import flax.linen as nn
import jax.numpy as jnp
def standardize(x, axis, eps):
x = x - jnp.mean(x, axis=axis, keepdims=True)
x = x / jnp.sqrt(jnp.mean(jnp.square(x), axis=axis, keepdims=True) + eps)
return x
# Defined our own, because we compute normalizing variance slightly differently,
# which does affect performance when loading pre-trained weights!
class GroupNorm(nn.Module):
"""Group normalization (arxiv.org/abs/1803.08494)."""
ngroups: int = 32
@nn.compact
def __call__(self, x):
input_shape = x.shape
group_shape = x.shape[:-1] + (self.ngroups, x.shape[-1] // self.ngroups)
x = x.reshape(group_shape)
# Standardize along spatial and group dimensions
x = standardize(x, axis=[1, 2, 4], eps=1e-5)
x = x.reshape(input_shape)
bias_scale_shape = tuple([1, 1, 1] + [input_shape[-1]])
x = x * self.param('scale', nn.initializers.ones, bias_scale_shape)
x = x + self.param('bias', nn.initializers.zeros, bias_scale_shape)
return x
class StdConv(nn.Conv):
def param(self, name, *a, **kw):
param = super().param(name, *a, **kw)
if name == 'kernel':
param = standardize(param, axis=[0, 1, 2], eps=1e-10)
return param
class RootBlock(nn.Module):
"""Root block of ResNet."""
width: int
@nn.compact
def __call__(self, x):
x = StdConv(self.width, (7, 7), (2, 2), padding=[(3, 3), (3, 3)],
use_bias=False, name='conv_root')(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])
return x
class ResidualUnit(nn.Module):
"""Bottleneck ResNet block."""
nmid: Optional[int] = None
strides: Sequence[int] = (1, 1)
@nn.compact
def __call__(self, x):
nmid = self.nmid or x.shape[-1] // 4
nout = nmid * 4
conv = functools.partial(StdConv, use_bias=False)
residual = x
x = GroupNorm(name='gn1')(x)
x = nn.relu(x)
if x.shape[-1] != nout or self.strides != (1, 1):
residual = conv(nout, (1, 1), self.strides, name='conv_proj')(x)
x = conv(nmid, (1, 1), name='conv1')(x)
x = GroupNorm(name='gn2')(x)
x = nn.relu(x)
x = conv(nmid, (3, 3), self.strides, padding=[(1, 1), (1, 1)],
name='conv2')(x)
x = GroupNorm(name='gn3')(x)
x = nn.relu(x)
x = conv(nout, (1, 1), name='conv3')(x)
return x + residual
class ResNetStage(nn.Module):
"""A stage (sequence of same-resolution blocks)."""
block_size: int
nmid: Optional[int] = None
first_stride: Sequence[int] = (1, 1)
@nn.compact
def __call__(self, x):
out = {}
x = out['unit01'] = ResidualUnit(
self.nmid, strides=self.first_stride, name='unit01')(x)
for i in range(1, self.block_size):
x = out[f'unit{i+1:02d}'] = ResidualUnit(
self.nmid, name=f'unit{i+1:02d}')(x)
return x, out
class Model(nn.Module):
"""ResNetV2."""
num_classes: Optional[int] = None
width: int = 1
depth: Union[int, Sequence[int]] = 50 # 5/101/152, or list of block depths.
@nn.compact
def __call__(self, image, *, train=False):
blocks = bit.get_block_desc(self.depth)
width = int(64 * self.width)
out = {}
x = out['stem'] = RootBlock(width=width, name='root_block')(image)
# Blocks
x, out['stage1'] = ResNetStage(blocks[0], nmid=width, name='block1')(x)
for i, block_size in enumerate(blocks[1:], 1):
x, out[f'stage{i + 1}'] = ResNetStage(
block_size, width * 2 ** i,
first_stride=(2, 2), name=f'block{i + 1}')(x)
# Pre-head
x = out['norm_pre_head'] = GroupNorm(name='norm-pre-head')(x)
x = out['pre_logits_2d'] = nn.relu(x)
x = out['pre_logits'] = jnp.mean(x, axis=(1, 2))
# Head
if self.num_classes:
head = nn.Dense(self.num_classes, name='head',
kernel_init=nn.initializers.zeros)
out['logits_2d'] = head(out['pre_logits_2d'])
x = out['logits'] = head(out['pre_logits'])
return x, out
def load(init_params, init_file, model_cfg, dont_load=()):
"""Loads the TF-dumped NumPy or big_vision checkpoint.
Args:
init_params: random init params from which the new head is taken.
init_file: comes from `config.model_init`, can either be an absolute
path (ie starts with /) to the checkpoint, or a string like
"L-imagenet2012" describing one of the variants from the paper.
model_cfg: the model configuration.
dont_load: list of param names to be reset to init.
Returns:
The loaded parameters.
"""
# Support for vanity model names from the paper.
vanity = {
'FunMatch-224px-i1k82.8': 'gs://bit_models/distill/R50x1_224.npz',
'FunMatch-160px-i1k80.5': 'gs://bit_models/distill/R50x1_160.npz',
}
if init_file[0] in ('L', 'M', 'S'): # The models from the original paper.
# Supported names are of the following type:
# - 'M' or 'S': the original "upstream" model without fine-tuning.
# - 'M-ILSVRC2012': i21k model fine-tuned on i1k.
# - 'M-run0-caltech101': i21k model fine-tuned on VTAB's caltech101.
# each VTAB fine-tuning was run 3x, so there's run0, run1, run2.
if '-' in init_file:
up, down = init_file[0], init_file[1:]
else:
up, down = init_file, ''
down = {'-imagenet2012': '-ILSVRC2012'}.get(down, down) # normalize
fname = f'BiT-{up}-R{model_cfg.depth}x{model_cfg.width}{down}.npz'
fname = f'gs://bit_models/{fname}'
else:
fname = vanity.get(init_file, init_file)
params = u.load_params(None, fname)
params = maybe_convert_big_transfer_format(params)
return common.merge_params(params, init_params, dont_load)
def maybe_convert_big_transfer_format(params_tf):
"""If the checkpoint comes from legacy codebase, convert it."""
# Only do anything at all if we recognize the format.
if 'resnet' not in params_tf:
return params_tf
# For ease of processing and backwards compatibility, flatten again:
params_tf = dict(u.tree_flatten_with_names(params_tf)[0])
# Works around some files containing weird naming of variables:
for k in list(params_tf):
k2 = re.sub('/standardized_conv2d_\\d+/', '/standardized_conv2d/', k)
if k2 != k:
params_tf[k2] = params_tf[k]
del params_tf[k]
params = {
'root_block': {'conv_root': {'kernel': params_tf[
'resnet/root_block/standardized_conv2d/kernel']}},
'norm-pre-head': {
'bias': params_tf['resnet/group_norm/beta'][None, None, None],
'scale': params_tf['resnet/group_norm/gamma'][None, None, None],
},
'head': {
'kernel': params_tf['resnet/head/conv2d/kernel'][0, 0],
'bias': params_tf['resnet/head/conv2d/bias'],
}
}
for block in ('block1', 'block2', 'block3', 'block4'):
params[block] = {}
units = set([re.findall(r'unit\d+', p)[0] for p in params_tf.keys()
if p.find(block) >= 0])
for unit in units:
params[block][unit] = {}
for i, group in enumerate('abc', 1):
params[block][unit][f'conv{i}'] = {
'kernel': params_tf[f'resnet/{block}/{unit}/{group}/standardized_conv2d/kernel'] # pylint: disable=line-too-long
}
params[block][unit][f'gn{i}'] = {
'bias': params_tf[f'resnet/{block}/{unit}/{group}/group_norm/beta'][None, None, None], # pylint: disable=line-too-long
'scale': params_tf[f'resnet/{block}/{unit}/{group}/group_norm/gamma'][None, None, None], # pylint: disable=line-too-long
}
projs = [p for p in params_tf.keys()
if p.find(f'{block}/{unit}/a/proj') >= 0]
assert len(projs) <= 1
if projs:
params[block][unit]['conv_proj'] = {
'kernel': params_tf[projs[0]]
}
return params
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A refactored and simplified ViT.
However, the names of modules are made to match the old ones for easy loading.
"""
from typing import Optional, Sequence, Union
from absl import logging
from big_vision import utils
from big_vision.models import common
import flax
import flax.linen as nn
import flax.training.checkpoints
import jax.numpy as jnp
import numpy as np
import scipy.ndimage
def posemb_sincos_2d(h, w, width, temperature=10_000., dtype=jnp.float32):
"""Follows the MoCo v3 logic."""
y, x = jnp.mgrid[:h, :w]
assert width % 4 == 0, "Width must be mult of 4 for sincos posemb"
omega = jnp.arange(width // 4) / (width // 4 - 1)
omega = 1. / (temperature**omega)
y = jnp.einsum("m,d->md", y.flatten(), omega)
x = jnp.einsum("m,d->md", x.flatten(), omega)
pe = jnp.concatenate([jnp.sin(x), jnp.cos(x), jnp.sin(y), jnp.cos(y)], axis=1)
return jnp.asarray(pe, dtype)[None, :, :]
def get_posemb(self, typ, seqshape, width, name, dtype=jnp.float32):
if typ == "learn":
return self.param(name, nn.initializers.normal(stddev=1/np.sqrt(width)),
(1, np.prod(seqshape), width), dtype)
elif typ == "sincos2d":
return posemb_sincos_2d(*seqshape, width, dtype=dtype)
else:
raise ValueError(f"Unknown posemb type: {typ}")
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block."""
mlp_dim: Optional[int] = None # Defaults to 4x input dim
dropout: float = 0.0
@nn.compact
def __call__(self, x, deterministic=True):
"""Applies Transformer MlpBlock module."""
inits = dict(
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
)
n, l, d = x.shape # pylint: disable=unused-variable
x = nn.Dense(self.mlp_dim or 4 * d, **inits)(x)
x = nn.gelu(x)
x = nn.Dropout(rate=self.dropout)(x, deterministic)
x = nn.Dense(d, **inits)(x)
return x
class Encoder1DBlock(nn.Module):
"""Single transformer encoder block (MHSA + MLP)."""
mlp_dim: Optional[int] = None # Defaults to 4x input dim
num_heads: int = 12
dropout: float = 0.0
@nn.compact
def __call__(self, x, deterministic=True):
out = {}
y = nn.LayerNorm()(x)
y = out["sa"] = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads,
kernel_init=nn.initializers.xavier_uniform(),
deterministic=deterministic,
)(y, y)
y = nn.Dropout(rate=self.dropout)(y, deterministic)
x = out["+sa"] = x + y
y = nn.LayerNorm()(x)
y = out["mlp"] = MlpBlock(
mlp_dim=self.mlp_dim, dropout=self.dropout,
)(y, deterministic)
y = nn.Dropout(rate=self.dropout)(y, deterministic)
x = out["+mlp"] = x + y
return x, out
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation."""
depth: int
mlp_dim: Optional[int] = None # Defaults to 4x input dim
num_heads: int = 12
dropout: float = 0.0
@nn.compact
def __call__(self, x, deterministic=True):
out = {}
# Input Encoder
for lyr in range(self.depth):
block = Encoder1DBlock(
name=f"encoderblock_{lyr}",
mlp_dim=self.mlp_dim, num_heads=self.num_heads, dropout=self.dropout)
x, out[f"block{lyr:02d}"] = block(x, deterministic)
out["pre_ln"] = x # Alias for last block, but without the number in it.
return nn.LayerNorm(name="encoder_norm")(x), out
class MAPHead(nn.Module):
"""Multihead Attention Pooling."""
mlp_dim: Optional[int] = None # Defaults to 4x input dim
num_heads: int = 12
@nn.compact
def __call__(self, x):
# TODO
n, l, d = x.shape # pylint: disable=unused-variable
probe = self.param("probe", nn.initializers.xavier_uniform(),
(1, 1, d), x.dtype)
probe = jnp.tile(probe, [n, 1, 1])
x = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads,
kernel_init=nn.initializers.xavier_uniform())(probe, x)
# TODO: dropout on head?
y = nn.LayerNorm()(x)
x = x + MlpBlock(mlp_dim=self.mlp_dim)(y)
return x[:, 0]
class _Model(nn.Module):
"""ViT model."""
num_classes: Optional[int] = None
patch_size: Sequence[int] = (16, 16)
width: int = 768
depth: int = 12
mlp_dim: Optional[int] = None # Defaults to 4x input dim
num_heads: int = 12
posemb: str = "learn" # Can also be "sincos2d"
rep_size: Union[int, bool] = False
dropout: float = 0.0
pool_type: str = "gap" # Can also be "map" or "tok"
head_zeroinit: bool = True
@nn.compact
def __call__(self, image, *, train=False):
out = {}
# Patch extraction
x = out["stem"] = nn.Conv(
self.width, self.patch_size, strides=self.patch_size,
padding="VALID", name="embedding")(image)
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# Add posemb before adding extra token.
x = out["with_posemb"] = x + get_posemb(
self, self.posemb, (h, w), c, "pos_embedding", x.dtype)
if self.pool_type == "tok":
cls = self.param("cls", nn.initializers.zeros, (1, 1, c), x.dtype)
x = jnp.concatenate([jnp.tile(cls, [n, 1, 1]), x], axis=1)
n, l, c = x.shape # pylint: disable=unused-variable
x = nn.Dropout(rate=self.dropout)(x, not train)
x, out["encoder"] = Encoder(
depth=self.depth,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout=self.dropout,
name="Transformer")(
x, deterministic=not train)
encoded = out["encoded"] = x
if self.pool_type == "map":
x = out["head_input"] = MAPHead(
num_heads=self.num_heads, mlp_dim=self.mlp_dim)(x)
elif self.pool_type == "gap":
x = out["head_input"] = jnp.mean(x, axis=1)
elif self.pool_type == "0":
x = out["head_input"] = x[:, 0]
elif self.pool_type == "tok":
x = out["head_input"] = x[:, 0]
encoded = encoded[:, 1:]
else:
raise ValueError(f"Unknown pool type: '{self.pool_type}'")
x_2d = jnp.reshape(encoded, [n, h, w, -1])
if self.rep_size:
rep_size = self.width if self.rep_size is True else self.rep_size
hid = nn.Dense(rep_size, name="pre_logits")
# NOTE: In the past we did not include tanh in pre_logits.
# For few-shot, it should not matter much, as it whitens anyways.
x_2d = nn.tanh(hid(x_2d))
x = nn.tanh(hid(x))
out["pre_logits_2d"] = x_2d
out["pre_logits"] = x
if self.num_classes:
kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {}
head = nn.Dense(self.num_classes, name="head", **kw)
x_2d = out["logits_2d"] = head(x_2d)
x = out["logits"] = head(x)
return x, out
def Model(num_classes=None, *, variant=None, **kw): # pylint: disable=invalid-name
"""Factory function, because linen really don't like what I'm doing!"""
return _Model(num_classes, **{**decode_variant(variant), **kw})
def decode_variant(variant):
"""Converts a string like "B" or "B/32" into a params dict."""
if variant is None:
return {}
v, patch = variant, {}
if "/" in variant:
v, patch = variant.split("/")
patch = {"patch_size": (int(patch), int(patch))}
return {
# pylint:disable=line-too-long
# Reference: Table 2 of https://arxiv.org/abs/2106.04560.
"width": {"Ti": 192, "S": 384, "M": 512, "B": 768, "L": 1024, "H": 1280, "g": 1408, "G": 1664, "e": 1792}[v],
"depth": {"Ti": 12, "S": 12, "M": 12, "B": 12, "L": 24, "H": 32, "g": 40, "G": 48, "e": 56}[v],
"mlp_dim": {"Ti": 768, "S": 1536, "M": 2048, "B": 3072, "L": 4096, "H": 5120, "g": 6144, "G": 8192, "e": 15360}[v],
"num_heads": {"Ti": 3, "S": 6, "M": 8, "B": 12, "L": 16, "H": 16, "g": 16, "G": 16, "e": 16}[v],
# pylint:enable=line-too-long
**patch
}
def resample_posemb(old, new):
"""This function implements "high-res finetuning" for transformer models."""
# Rescale the grid of position embeddings. Param shape is (1,N,1024)
if old.shape == new.shape:
return old
logging.info("ViT: resize %s to %s", old.shape, new.shape)
gs_old = int(np.sqrt(old.shape[1]))
gs_new = int(np.sqrt(new.shape[1]))
logging.info("ViT: grid-size from %s to %s", gs_old, gs_new)
grid = old.reshape(gs_old, gs_old, -1)
zoom = (gs_new/gs_old, gs_new/gs_old, 1)
grid = scipy.ndimage.zoom(grid, zoom, order=1)
grid = grid.reshape(1, gs_new*gs_new, -1)
return jnp.array(grid)
def fix_old_checkpoints(params):
"""Fix small bwd incompat that can't be resolved with names in model def."""
params = flax.core.unfreeze(
flax.training.checkpoints.convert_pre_linen(params))
# Original ViT paper variant had posemb in a module:
if "posembed_input" in params["Transformer"]:
logging.info("ViT: Loading and fixing VERY old posemb")
posemb = params["Transformer"].pop("posembed_input")
params["pos_embedding"] = posemb["pos_embedding"]
# Widely used version before 2022 had posemb in Encoder:
if "pos_embedding" in params["Transformer"]:
logging.info("ViT: Loading and fixing old posemb")
params["pos_embedding"] = params["Transformer"].pop("pos_embedding")
# Old vit.py used to first concat [cls] token, then add posemb.
# This means a B/32@224px would have 7x7+1 posembs. This is useless and clumsy
# so we changed to add posemb then concat [cls]. We can recover the old
# checkpoint by manually summing [cls] token and its posemb entry.
if "pos_embedding" in params:
pe = params["pos_embedding"]
if int(np.sqrt(pe.shape[1])) ** 2 + 1 == int(pe.shape[1]):
logging.info("ViT: Loading and fixing combined cls+posemb")
pe_cls, params["pos_embedding"] = pe[:, :1], pe[:, 1:]
if "cls" in params:
params["cls"] += pe_cls
# MAP-head variants during ViT-G development had it inlined:
if "probe" in params:
params["MAPHead_0"] = {
k: params.pop(k) for k in
["probe", "MlpBlock_0", "MultiHeadDotProductAttention_0", "LayerNorm_0"]
}
return params
def load(init_params, init_file, model_cfg, dont_load=()): # pylint: disable=invalid-name because we had to CamelCase above.
"""Load init from checkpoint, both old model and this one. +Hi-res posemb."""
del model_cfg
init_file = VANITY_NAMES.get(init_file, init_file)
restored_params = utils.load_params(None, init_file)
restored_params = fix_old_checkpoints(restored_params)
# possibly use the random init for some of the params (such as, the head).
restored_params = common.merge_params(restored_params, init_params, dont_load)
# resample posemb if needed.
if init_params and "pos_embedding" in init_params:
restored_params["pos_embedding"] = resample_posemb(
old=restored_params["pos_embedding"],
new=init_params["pos_embedding"])
return restored_params
# Shortcut names for some canonical paper checkpoints:
VANITY_NAMES = {
# pylint: disable=line-too-long
# pylint: disable=line-too-long
# Recommended models from https://arxiv.org/abs/2106.10270
# Many more models at https://github.com/google-research/vision_transformer
"howto-i21k-Ti/16": "gs://vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz",
"howto-i21k-S/32": "gs://vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_none-wd_0.1-do_0.0-sd_0.0.npz",
"howto-i21k-S/16": "gs://vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz",
"howto-i21k-B/32": "gs://vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0.npz",
"howto-i21k-B/16": "gs://vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz",
"howto-i21k-B/8": "gs://vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz",
"howto-i21k-L/16": "gs://vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0.npz",
# Better plain vit-s16 baselines from https://arxiv.org/abs/2205.01580
"i1k-s16-90ep": "gs://big_vision/vit_s16_i1k_90ep.npz",
"i1k-s16-150ep": "gs://big_vision/vit_s16_i1k_150ep.npz",
"i1k-s16-300ep": "gs://big_vision/vit_s16_i1k_300ep.npz",
# DeiT-3 checkpoints from https://github.com/facebookresearch/deit/blob/main/README_revenge.md
# First layer converted to take inputs in [-1,1]
"deit3_S_224_1k": "gs://big_vision/zoo/deit3/bv_deit_3_small_224_1k.npz",
"deit3_S_224_21k": "gs://big_vision/zoo/deit3/bv_deit_3_small_224_21k.npz",
"deit3_S_384_1k": "gs://big_vision/zoo/deit3/bv_deit_3_small_384_1k.npz",
"deit3_S_384_21k": "gs://big_vision/zoo/deit3/bv_deit_3_small_384_21k.npz",
"deit3_B_224_1k": "gs://big_vision/zoo/deit3/bv_deit_3_base_224_1k.npz",
"deit3_B_224_21k": "gs://big_vision/zoo/deit3/bv_deit_3_base_224_21k.npz",
"deit3_B_384_1k": "gs://big_vision/zoo/deit3/bv_deit_3_base_384_1k.npz",
"deit3_B_384_21k": "gs://big_vision/zoo/deit3/bv_deit_3_base_384_21k.npz",
"deit3_L_224_1k": "gs://big_vision/zoo/deit3/bv_deit_3_large_224_1k.npz",
"deit3_L_224_21k": "gs://big_vision/zoo/deit3/bv_deit_3_large_224_21k.npz",
"deit3_L_384_1k": "gs://big_vision/zoo/deit3/bv_deit_3_large_384_1k.npz",
"deit3_L_384_21k": "gs://big_vision/zoo/deit3/bv_deit_3_large_384_21k.npz",
# pylint: disable=line-too-long
# pylint: enable=line-too-long
}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities shared across models."""
from absl import logging
import big_vision.utils as u
import flax.linen as nn
import jax
import jax.numpy as jnp
def merge_params(loaded, inited, dont_load=()):
"""Makes `loaded` pytree match `init`, warning or failing on mismatch.
Args:
loaded: pytree of parameters, typically loaded from a checkpoint.
inited: pytree of parameter, typically coming from model init.
dont_load: List of regexes for parameters which shall not be taken
from `loaded`, either because they should remain at their init value,
or because they are missing on either side.
Returns:
If successful, a new pytree which matches the structure of `init`
but contains values from `loaded`, except for `dont_load`.
If structures don't match and mismatches are not covered by regexes in
`dont_load` argument, then raises an exception with more information.
"""
if inited is None: # A useful shortcut for example for colabs.
return loaded
dont_load = u.check_and_compile_patterns(dont_load)
def should_merge(name):
return not any(pattern.fullmatch(name) for pattern in dont_load)
loaded_flat, _ = u.tree_flatten_with_names(loaded)
inited_flat, _ = u.tree_flatten_with_names(inited)
loaded_flat = {k: v for k, v in loaded_flat}
inited_flat = {k: v for k, v in inited_flat}
# Let's first build the pytree from all common keys.
merged = {}
for name, init_val in inited_flat.items():
# param is present in both. Load or ignore it!
if name in loaded_flat and should_merge(name):
merged[name] = loaded_flat[name]
else:
logging.info("Ignoring checkpoint and using init value for %s", name)
merged[name] = init_val
def pp(title, names, indent=" "): # Just pretty-printing
if names:
return f"{title}:\n" + "\n".join(f"{indent}{k}" for k in sorted(names))
else:
return ""
# Now, if there are keys that only exist in inited or loaded, be helpful:
not_in_loaded = inited_flat.keys() - loaded_flat.keys()
not_in_inited = loaded_flat.keys() - inited_flat.keys()
logging.info(pp("Parameters in model but not in checkpoint", not_in_loaded))
logging.info(pp("Parameters in checkpoint but not in model", not_in_inited))
# And now see if any of them are not explicitly ignored => an error
not_in_loaded = {k for k in not_in_loaded if should_merge(k)}
not_in_inited = {k for k in not_in_inited if should_merge(k)}
if not_in_loaded or not_in_inited:
raise ValueError(
pp("Params in checkpoint", loaded_flat.keys()) + "\n" +
pp("Params in model (code)", inited_flat.keys()) + "\n" +
pp("Params in model (code) but not in checkpoint and not `dont_load`ed",
not_in_loaded, indent=" - ") + "\n" + # Special indent for tests.
pp("Params in checkpoint but not in model (code) and not `dont_load`ed",
not_in_inited, indent=" + ")) # Special indent for tests.
return u.recover_tree(merged.keys(), merged.values())
class AddPositionEmbs(nn.Module):
"""Adds positional embeddings to the inputs, supports caching for decode.
Attributes:
decode: whether to run in single-position autoregressive mode.
"""
decode: bool = False
@nn.compact
def __call__(self, inputs, posemb):
"""Applies AddPositionEmbs module.
Adds posemb to the inputs, supports single-position autoregressive mode.
Args:
inputs: input data [batch_size, seq_len, emb_dim].
posemb: positional embeddings.
Returns:
output: inputs modulated by pos-embeddings [batch_size, seq_len, emb_dim].
"""
assert inputs.ndim == 3, f"Unexpected inputs shape: {inputs.shape}"
_, seq_len, emb_dim = inputs.shape
pe = posemb[:, :seq_len, :]
if self.decode:
is_initialized = self.has_variable("cache", "cache_index")
# We use a cache position index for tracking decoding position.
cache_index = self.variable("cache", "cache_index",
lambda: jnp.array(0, dtype=jnp.uint32))
if is_initialized:
i = cache_index.value
cache_index.value = i + 1
# Returns posemb[0, i, :], the positional embedding for the
# current decoding position.
pe = jax.lax.dynamic_slice(posemb,
start_indices=jnp.array((0, i, 0)),
slice_sizes=(1, 1, emb_dim))
return inputs + pe
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet V1 with GroupNorm."""
from typing import Optional, Sequence, Union
from big_vision import utils
from big_vision.models import common
import flax
import flax.linen as nn
import flax.training.checkpoints
import jax.numpy as jnp
import numpy as np
def weight_standardize(w, axis, eps):
w = w - jnp.mean(w, axis=axis)
w = w / (jnp.std(w, axis=axis) + eps)
return w
class StdConv(nn.Conv):
def param(self, name, *a, **kw):
param = super().param(name, *a, **kw)
if name == "kernel":
param = weight_standardize(param, axis=[0, 1, 2], eps=1e-5)
return param
class ResidualUnit(nn.Module):
"""Bottleneck ResNet block."""
nmid: Optional[int] = None
strides: Sequence[int] = (1, 1)
@nn.compact
def __call__(self, x):
nmid = self.nmid or x.shape[-1] // 4
nout = nmid * 4
residual = x
if x.shape[-1] != nout or self.strides != (1, 1):
residual = StdConv(nout, (1, 1), self.strides, use_bias=False,
name="conv_proj")(residual)
residual = nn.GroupNorm(name="gn_proj")(residual)
y = StdConv(nmid, (1, 1), use_bias=False, name="conv1")(x)
y = nn.GroupNorm(name="gn1")(y)
y = nn.relu(y)
y = StdConv(nmid, (3, 3), self.strides, use_bias=False, name="conv2")(y)
y = nn.GroupNorm(name="gn2")(y)
y = nn.relu(y)
y = StdConv(nout, (1, 1), use_bias=False, name="conv3")(y)
y = nn.GroupNorm(name="gn3", scale_init=nn.initializers.zeros)(y)
y = nn.relu(residual + y)
return y
class ResNetStage(nn.Module):
"""One stage of ResNet."""
block_size: int
first_stride: Sequence[int] = (1, 1)
nmid: Optional[int] = None
@nn.compact
def __call__(self, x):
x = ResidualUnit(self.nmid, strides=self.first_stride, name="unit1")(x)
for i in range(1, self.block_size):
x = ResidualUnit(self.nmid, name=f"unit{i + 1}")(x)
return x
class Model(nn.Module):
"""ResNetV1."""
num_classes: Optional[int] = None
width: float = 1
depth: Union[int, Sequence[int]] = 50
@nn.compact
def __call__(self, image, *, train=False):
del train # Unused
blocks = get_block_desc(self.depth)
width = int(64 * self.width)
out = {}
# Root block
x = StdConv(width, (7, 7), (2, 2), use_bias=False, name="conv_root")(image)
x = nn.GroupNorm(name="gn_root")(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding="SAME")
out["stem"] = x
# Stages
x = ResNetStage(blocks[0], nmid=width, name="block1")(x)
out["stage1"] = x
for i, block_size in enumerate(blocks[1:], 1):
x = ResNetStage(block_size, nmid=width * 2 ** i,
first_stride=(2, 2), name=f"block{i + 1}")(x)
out[f"stage{i + 1}"] = x
out["pre_logits_2d"] = x
# Head
x = out["pre_logits"] = jnp.mean(x, axis=(1, 2))
if self.num_classes:
head = nn.Dense(self.num_classes, name="head",
kernel_init=nn.initializers.zeros)
out["logits_2d"] = head(out["pre_logits_2d"])
x = out["logits"] = head(out["pre_logits"])
return x, out
# A dictionary mapping the number of layers in a resnet to the number of
# blocks in each stage of the model.
# NOTE: Does not include 18/34 as they also need non-bottleneck block!
def get_block_desc(depth):
if isinstance(depth, list): # Be robust to silly mistakes.
depth = tuple(depth)
return {
26: [2, 2, 2, 2], # From timm, gets ~75% on ImageNet.
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}.get(depth, depth)
def fix_old_checkpoints(params):
"""Modifies params from old checkpoints to run with current implementation."""
params = flax.core.unfreeze(
flax.training.checkpoints.convert_pre_linen(params))
# Old linen used to store non-squeezed GN params.
params = flax.traverse_util.unflatten_dict({
k: np.squeeze(v) if (set(k)
& {"gn_root", "gn_proj", "gn1", "gn2", "gn3"}) else v
for k, v in flax.traverse_util.flatten_dict(params).items()
})
return params
def load(init_params, init_file, model_cfg, dont_load=()):
"""Load init from checkpoint."""
del model_cfg # Unused
params = utils.load_params(None, init_file)
params = common.merge_params(params, init_params, dont_load)
params = fix_old_checkpoints(params)
return params
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLP-Mixer model."""
from typing import Optional, Tuple
from absl import logging
from big_vision import utils
from big_vision.models import common
import einops
import flax.linen as nn
import jax
import jax.numpy as jnp
class MlpBlock(nn.Module):
mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.Dense(self.mlp_dim)(x)
y = nn.gelu(y)
return nn.Dense(x.shape[-1])(y)
class MixerBlock(nn.Module):
"""Mixer block layer."""
tokens_mlp_dim: int
channels_mlp_dim: int
drop_p: float
@nn.compact
def __call__(self, x, *, train=False):
y = nn.LayerNorm()(x)
y = jnp.swapaxes(y, 1, 2)
y = MlpBlock(self.tokens_mlp_dim, name="token_mixing")(y)
y = jnp.swapaxes(y, 1, 2)
x = x + y * _stoch_depth_mask(x, self.drop_p, not train, self.make_rng)
y = nn.LayerNorm()(x)
y = MlpBlock(self.channels_mlp_dim, name="channel_mixing")(y)
return x + y * _stoch_depth_mask(x, self.drop_p, not train, self.make_rng)
class MlpMixer(nn.Module):
"""Mixer architecture."""
patch_size: Tuple[int, int]
num_classes: Optional[int]
num_blocks: int
hidden_dim: int
tokens_mlp_dim: int
channels_mlp_dim: int
model_name: Optional[str] = None
stoch_depth: float = 0.0
@nn.compact
def __call__(self, image, *, train=False):
out = {}
x = out["stem"] = nn.Conv(self.hidden_dim, self.patch_size,
strides=self.patch_size, name="stem")(image)
x = out["input_tokens"] = einops.rearrange(x, "n h w c -> n (h w) c")
for i in range(self.num_blocks):
drop_p = (i / max(self.num_blocks - 1, 1)) * self.stoch_depth
x = out[f"block_{i}"] = MixerBlock(
self.tokens_mlp_dim, self.channels_mlp_dim, drop_p)(x, train=train)
x = nn.LayerNorm(name="pre_head_layer_norm")(x)
x = out["pre_logits"] = jnp.mean(x, axis=1)
if self.num_classes:
x = out["logits"] = nn.Dense(
self.num_classes, kernel_init=nn.initializers.zeros, name="head")(x)
return x, out
def Model(num_classes=None, *, variant=None, **kw): # pylint: disable=invalid-name
"""Factory function to easily create a Model variant like "L/16"."""
if variant is not None:
model_size, patch = variant.split("/")
kw.setdefault("patch_size", (int(patch), int(patch)))
config = {
"S": {
"hidden_dim": 512,
"num_blocks": 8,
"channels_mlp_dim": 2048,
"tokens_mlp_dim": 256
},
"B": {
"hidden_dim": 768,
"num_blocks": 12,
"channels_mlp_dim": 3072,
"tokens_mlp_dim": 384
},
"L": {
"hidden_dim": 1024,
"num_blocks": 24,
"channels_mlp_dim": 4096,
"tokens_mlp_dim": 512
},
"H": {
"hidden_dim": 1280,
"num_blocks": 32,
"channels_mlp_dim": 5120,
"tokens_mlp_dim": 640
},
}[model_size]
for k, v in config.items():
kw.setdefault(k, v)
logging.info("Mixer config: %s", kw)
return MlpMixer(num_classes=num_classes, **kw)
def load(init_params, init_file, model_cfg, dont_load=()):
"""Load checkpoint."""
del model_cfg
# Shortcut names for some canonical paper checkpoints:
init_file = {
# pylint: disable=line-too-long
# Pretrained models from the MLP-Mixer paper: https://arxiv.org/abs/2105.01601.
"B-i1k/16": "gs://mixer_models/imagenet1k/Mixer-B_16.npz",
"L-i1k/16": "gs://mixer_models/imagenet1k/Mixer-L_16.npz",
"B-i21k/16": "gs://mixer_models/imagenet21k/Mixer-B_16.npz",
"L-i21k/16": "gs://mixer_models/imagenet21k/Mixer-L_16.npz",
# pylint: enable=line-too-long
}.get(init_file, init_file)
restored_params = utils.load_params(None, init_file)
# possibly use the random init for some of the params (such as, the head).
restored_params = common.merge_params(restored_params, init_params, dont_load)
return restored_params
def _stoch_depth_mask(x, drop_p, deterministic, make_rng):
if not deterministic and drop_p:
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
return 1.0 - jax.random.bernoulli(make_rng("dropout"), drop_p, shape)
return 1.0
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model definition to train a single ViT model with the contrastive trainer."""
import importlib
from typing import Optional, Any
from big_vision import utils
import flax.linen as nn
import jax.numpy as jnp
ConfigDict = Any
class Model(nn.Module):
"""Single ViT to encode regular images and text images."""
image: Optional[ConfigDict] = None
image_model: str = "vit"
out_dim: int = 768
temperature_init: float = 10.0
@nn.compact
def __call__(self, image, text=None, **kw):
"""Returns (B, C) image and (B, C) text representations, and some extras."""
ztxt, zimg = None, None
kw = kw or {}
image_model = importlib.import_module(
f"big_vision.models.{self.image_model}"
).Model(**{"num_classes": self.out_dim, **(self.image or {})}, name="img") # pylint: disable=not-a-mapping
def _compute_embedding(input_image, prefix):
zemb, out_emb = image_model(input_image, **kw)
out = {f"{prefix}/{k}": v for k, v in out_emb.items()}
# Normalize the embeddings.
out[f"{prefix}/norm"] = jnp.linalg.norm(zemb, axis=1, keepdims=True)
out[f"{prefix}/normalized"] = zemb = zemb / (out[f"{prefix}/norm"] + 1e-8)
return zemb, out
out = {}
if image is not None:
zimg, out_img = _compute_embedding(image, "img")
out.update(out_img)
if text is not None:
ztxt, out_txt = _compute_embedding(text, "txt")
out.update(out_txt)
temp_init = jnp.log(self.temperature_init)
t = self.param("t",
lambda key, shape, dtype: temp_init*jnp.ones(shape, dtype),
(1,), jnp.float32)
out["t"] = jnp.exp(t)
out["t/parameter"] = t
return zimg, ztxt, out
def load(init_params, init_files, model_cfg, img_load_kw={}): # pylint: disable=dangerous-default-value
"""Loads the ViT parameters - adapted from proj/image_text/two_towers.py."""
if isinstance(init_files, str):
# A shortcut for a single file checkpoint of a two_towers model.
init_files = {k: f"{init_files}:{k}" for k in ("img", "t")}
else:
init_files = {**init_files} # Shallow copy because we'll pop stuff off.
restored_params = {**init_params}
img_init = init_files.pop("image", init_files.pop("img", None))
if img_init:
restored_params["img"] = importlib.import_module(
f"big_vision.models.{model_cfg.image_model}"
).load(init_params["img"], img_init, model_cfg.image, **img_load_kw)
t_init = init_files.pop("temperature", init_files.pop("t", None))
if t_init:
restored_params["t"] = utils.load_params(None, t_init)
assert not init_files, (
f"There's something unused left in `config.model_init`. You probably got "
f"a typo. Here it is: {init_files}")
return restored_params
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bert."""
import tempfile
from big_vision import input_pipeline
from big_vision.models.proj.flaxformer import bert
from big_vision.models.proj.flaxformer import bert_test_util
import big_vision.pp.builder as pp_builder
import big_vision.pp.ops_general # pylint: disable=unused-import
import big_vision.pp.proj.flaxformer.bert_ops # pylint: disable=unused-import
import jax
import jax.numpy as jnp
import tensorflow as tf
# BERT vocabulary for testing.
_BERT_VOCAB = [
"[PAD]",
"[UNK]",
"this",
"is",
"a",
"test",
"[CLS]",
"[SEP]",
]
_TOKEN_LEN = 16
class BertTest(tf.test.TestCase):
def test_load_apply(self):
inkey = "text"
vocab_path = f"{tempfile.mkdtemp()}/vocab.txt"
with open(vocab_path, "w") as f:
f.write("\n".join(_BERT_VOCAB))
ds2, _ = input_pipeline.make_for_inference(
tf.data.Dataset.from_tensor_slices(
{inkey: tf.ragged.constant([["this is a test"]])}),
num_ex_per_process=[1],
preprocess_fn=pp_builder.get_preprocess_fn(
f"bert_tokenize(inkey='{inkey}', vocab_path='{vocab_path}', "
f"max_len={_TOKEN_LEN})"
"|keep('labels')"),
batch_size=1,
)
text = jnp.array(next(iter(ds2))["labels"])
model = bert.Model(config="base")
variables = model.init(jax.random.PRNGKey(0), text)
params = bert.load(variables.unfreeze()["params"],
bert_test_util.create_base_checkpoint())
x, out = model.apply({"params": params}, text)
self.assertAllEqual(jax.tree_map(jnp.shape, x), (1, 768))
self.assertAllEqual(
jax.tree_map(jnp.shape, out), {
"transformed": (1, 16, 768),
"pre_logits": (1, 768),
})
if __name__ == "__main__":
tf.test.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for fake BERT checkpoint."""
import tempfile
import tensorflow.compat.v1 as tf
# Checkpoint structure was extracted with the following (Colab) snippet:
#
# !wget https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-768_A-12.zip # pylint: disable=line-too-long
# !unzip uncased_L-12_H-768_A-12.zip
#
# import tensorflow.compat.v1 as tf
#
# ckpt_reader = tf.train.load_checkpoint('bert_model.ckpt')
# tf_params = {
# tf_name: ckpt_reader.get_tensor(tf_name)
# for tf_name in ckpt_reader.get_variable_to_dtype_map()
# }
#
# 'shapes_dtypes = {\n%s\n}' % '\n'.join(
# f' "{k}": ({v.shape}, "{v.dtype}"),'
# for k, v, in tf_params.items()
# )
# pylint: disable=line-too-long
_BASE_SHAPES_DTYPES = {
"cls/seq_relationship/output_bias": ((2,), "float32"),
"cls/predictions/transform/LayerNorm/gamma": ((768,), "float32"),
"cls/predictions/transform/LayerNorm/beta": ((768,), "float32"),
"bert/pooler/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_9/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_9/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_3/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_7/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_9/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_7/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_9/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_9/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_9/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_9/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_9/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_8/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_4/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_8/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_8/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_11/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_11/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_8/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_8/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_8/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_1/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_8/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_3/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_8/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_8/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_8/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_7/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_7/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_8/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_7/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_7/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_9/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_7/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_7/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_6/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_6/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_6/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_0/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_6/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_7/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_4/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_2/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_5/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_5/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_9/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_3/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_8/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_5/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_5/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_5/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_4/output/dense/bias": ((768,), "float32"),
"bert/embeddings/token_type_embeddings": ((2, 768), "float32"),
"bert/encoder/layer_4/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_4/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_7/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_4/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_9/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_10/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_6/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_4/attention/self/query/bias": ((768,), "float32"),
"cls/seq_relationship/output_weights": ((2, 768), "float32"),
"bert/encoder/layer_7/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_4/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_4/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_4/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_3/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_1/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_2/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_8/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_4/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_3/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_4/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_3/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_1/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_3/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_10/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_3/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_1/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_0/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_10/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_3/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_3/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_1/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_3/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_1/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_3/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_2/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_6/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_11/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_2/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_2/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_6/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_11/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_6/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_11/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_11/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_11/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_10/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_11/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_6/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_6/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_11/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_10/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_4/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_11/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_10/attention/self/query/bias": ((768,), "float32"),
"bert/embeddings/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_2/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_11/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_11/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_5/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_3/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_10/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_10/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/embeddings/word_embeddings": ((30522, 768), "float32"),
"bert/encoder/layer_9/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_9/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_6/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_10/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_6/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_1/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_5/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_2/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_0/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_3/intermediate/dense/kernel": ((768, 3072), "float32"),
"cls/predictions/output_bias": ((30522,), "float32"),
"bert/encoder/layer_0/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_6/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_0/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_10/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_5/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_4/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_0/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_0/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_10/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_7/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_3/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_2/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_8/output/dense/kernel": ((3072, 768), "float32"),
"bert/embeddings/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_1/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_10/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_6/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_2/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_11/attention/self/value/bias": ((768,), "float32"),
"bert/encoder/layer_9/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_0/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_10/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_10/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_1/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_8/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_0/intermediate/dense/bias": ((3072,), "float32"),
"bert/encoder/layer_1/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_1/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_7/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_8/attention/output/dense/bias": ((768,), "float32"),
"cls/predictions/transform/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_6/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_5/attention/self/key/kernel": ((768, 768), "float32"),
"bert/encoder/layer_0/attention/self/value/kernel": ((768, 768), "float32"),
"bert/encoder/layer_7/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_7/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_1/output/dense/kernel": ((3072, 768), "float32"),
"bert/encoder/layer_11/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_4/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_1/attention/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_9/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_2/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_0/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_10/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_1/attention/self/query/bias": ((768,), "float32"),
"bert/encoder/layer_3/output/LayerNorm/beta": ((768,), "float32"),
"bert/encoder/layer_6/attention/output/dense/kernel": ((768, 768), "float32"),
"bert/encoder/layer_1/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_11/output/dense/bias": ((768,), "float32"),
"cls/predictions/transform/dense/bias": ((768,), "float32"),
"bert/encoder/layer_0/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_11/attention/self/query/kernel": ((768, 768), "float32"),
"bert/encoder/layer_0/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_0/attention/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_7/attention/output/LayerNorm/gamma": ((768,), "float32"),
"bert/encoder/layer_4/attention/self/key/bias": ((768,), "float32"),
"bert/encoder/layer_10/attention/self/key/kernel": ((768, 768), "float32"),
"bert/embeddings/position_embeddings": ((512, 768), "float32"),
"bert/encoder/layer_1/output/dense/bias": ((768,), "float32"),
"bert/encoder/layer_9/intermediate/dense/kernel": ((768, 3072), "float32"),
"bert/encoder/layer_0/output/LayerNorm/beta": ((768,), "float32"),
"bert/pooler/dense/bias": ((768,), "float32"),
"bert/encoder/layer_0/attention/output/LayerNorm/beta": ((768,), "float32"),
}
# pylint: enable=line-too-long
def create_base_checkpoint():
"""Returns path to fake Bert "base" checkpoint directory (zero init)."""
directory = tempfile.mkdtemp()
path = f"{directory}/bert_model.ckpt"
with tf.Session() as sess:
for name, (shape, dtype) in _BASE_SHAPES_DTYPES.items():
tf.Variable(tf.zeros(shape, dtype), name=name)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.save(sess, path)
return directory
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT encoder, optionally loading pre-trained checkpoints."""
import dataclasses
from typing import Optional
from absl import logging
from big_vision import utils
from big_vision.models import common
import flax
import flax.linen as nn
import jax.numpy as jnp
from tensorflow.io import gfile
from flaxformer.architectures.bert import bert
from flaxformer.architectures.bert import bert_checkpoint_converter
from flaxformer.architectures.bert import configs
class Model(nn.Module):
"""BERT encoder with linear projection on last layer CLS token."""
config: str
num_classes: Optional[int] = None
head_zeroinit: bool = True
@nn.compact
def __call__(self, text, *, train=False):
out = {}
batch_size, max_len = text.shape
bert_model = bert.BertEncoder(**dataclasses.asdict({
"base": configs.BertBaseConfig(),
"large": configs.BertLargeConfig(),
}[self.config]))
x = out["transformed"] = bert_model(
token_ids=text,
position_ids=jnp.tile(
jnp.arange(0, max_len, dtype=jnp.int32), [batch_size, 1]),
segment_ids=jnp.zeros([batch_size, max_len], dtype=jnp.int32),
input_mask=text.astype(jnp.bool_).astype(jnp.int32),
enable_dropout=train,
)
x = out["pre_logits"] = x[:, 0] # CLS token
if self.num_classes:
kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {}
x = out["logits"] = nn.Dense(self.num_classes, name="head", **kw)(x)
return x, out
def load(params, path, model_cfg=None, dont_load=()):
"""Returns `params` with BERT weights replaced from checkpoint at `path`."""
del model_cfg
checkpoint_path = f"{path}/bert_model.ckpt"
if gfile.exists(f"{checkpoint_path}.index"):
logging.info("Loading original BERT checkpoint from '%s'", checkpoint_path)
params = flax.core.FrozenDict(params).unfreeze() # Recursive copy.
max_len = (
params["BertEncoder_0"]["embedder"]["embedders_position_ids"]
["embedding"].shape[0])
bert_params, pooler_params = (
bert_checkpoint_converter.load_params_from_tf_checkpoint(
checkpoint_path=f"{path}/bert_model.ckpt"))
del pooler_params
if isinstance(bert_params, flax.core.FrozenDict):
bert_params = bert_params.unfreeze()
bert_params["embedder"]["embedders_position_ids"]["embedding"] = (
bert_params["embedder"]["embedders_position_ids"]["embedding"][:max_len]
)
return common.merge_params(
{"BertEncoder_0": bert_params}, params, dont_load)
logging.info(
"Could not find original BERT checkpoint path '%s', "
"loading big_vision checkpoint '%s'", checkpoint_path, path)
restored_params = utils.load_params(None, path)
return common.merge_params(restored_params, params, dont_load)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the FlexiViT model."""
from absl.testing import absltest
from big_vision.models.proj.flexi import vit
import jax
from jax import config
from jax import numpy as jnp
import numpy as np
import tensorflow as tf
config.update("jax_enable_x64", True)
class PatchEmbTest(absltest.TestCase):
def _test_patch_emb_resize(self, old_shape, new_shape, n_patches=100):
# This test verifies that if we resize the input image patch and resample
# the patch embedding accordingly, the output does not change.
# NOTE: if the image contains more than one patch, then the embeddings will
# change due to patch interaction during the resizing.
patch_shape = old_shape[:-2]
resized_patch_shape = new_shape[:-2]
patches = np.random.randn(n_patches, *old_shape[:-1])
w_emb = jnp.asarray(np.random.randn(*old_shape))
old_embeddings = jax.lax.conv_general_dilated(
patches, w_emb, window_strides=patch_shape, padding="VALID",
dimension_numbers=("NHWC", "HWIO", "NHWC"), precision="highest")
patch_resized = tf.image.resize(
tf.constant(patches), resized_patch_shape, method="bilinear").numpy()
patch_resized = jnp.asarray(patch_resized).astype(jnp.float64)
w_emb_resampled = vit.resample_patchemb(w_emb, resized_patch_shape)
self.assertEqual(w_emb_resampled.shape, new_shape)
new_embeddings = jax.lax.conv_general_dilated(
patch_resized, w_emb_resampled, window_strides=resized_patch_shape,
padding="VALID", dimension_numbers=("NHWC", "HWIO", "NHWC"),
precision="highest")
self.assertEqual(old_embeddings.shape, new_embeddings.shape)
np.testing.assert_allclose(
old_embeddings, new_embeddings, rtol=1e-1, atol=1e-4)
def test_resize_square(self):
out_channels = 256
patch_sizes = [48, 40, 30, 24, 20, 16, 15, 12, 10, 8, 6, 5]
for s in patch_sizes:
old_shape = (s, s, 3, out_channels)
for t in patch_sizes:
new_shape = (t, t, 3, out_channels)
if s <= t:
self._test_patch_emb_resize(old_shape, new_shape)
def test_resize_rectangular(self):
out_channels = 256
old_shape = (8, 10, 3, out_channels)
new_shape = (10, 12, 3, out_channels)
self._test_patch_emb_resize(old_shape, new_shape)
old_shape = (8, 6, 3, out_channels)
new_shape = (9, 15, 3, out_channels)
self._test_patch_emb_resize(old_shape, new_shape)
old_shape = (8, 6, 3, out_channels)
new_shape = (15, 9, 3, out_channels)
self._test_patch_emb_resize(old_shape, new_shape)
def test_input_channels(self):
out_channels = 256
for c in [1, 3, 10]:
old_shape = (8, 10, c, out_channels)
new_shape = (10, 12, c, out_channels)
self._test_patch_emb_resize(old_shape, new_shape)
def _test_works(self, old_shape, new_shape):
old = jnp.asarray(np.random.randn(*old_shape))
resampled = vit.resample_patchemb(old, new_shape[:2])
self.assertEqual(resampled.shape, new_shape)
self.assertEqual(resampled.dtype, old.dtype)
def test_downsampling(self):
# NOTE: for downsampling we cannot guarantee that the outputs would match
# before and after downsampling. So, we simply test that the code runs and
# produces an output of the correct shape and type.
out_channels = 256
for t in [4, 5, 6, 7]:
for c in [1, 3, 5]:
old_shape = (8, 8, c, out_channels)
new_shape = (t, t, c, out_channels)
self._test_works(old_shape, new_shape)
def _test_raises(self, old_shape, new_shape):
old = jnp.asarray(np.random.randn(*old_shape))
with self.assertRaises(AssertionError):
vit.resample_patchemb(old, new_shape)
def test_raises_incorrect_dims(self):
old_shape = (8, 10, 3, 256)
new_shape = (10, 12, 1, 256)
self._test_raises(old_shape, new_shape)
old_shape = (8, 10, 1, 256)
new_shape = (10, 12, 3, 256)
self._test_raises(old_shape, new_shape)
old_shape = (8, 10, 3, 128)
new_shape = (10, 12, 3, 256)
self._test_raises(old_shape, new_shape)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A version of ViT with flexible seqlen ((internal link))."""
from typing import Optional, Sequence
from absl import logging
from big_vision import utils
from big_vision.models import common
from big_vision.models import vit
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
def resample_patchemb(old, new_hw):
"""Resample the weights of the patch embedding kernel to target resolution.
We resample the patch embedding kernel by approximately inverting the effect
of patch resizing. Colab with detailed explanation:
(internal link)
With this resizing, we can for example load a B/8 filter into a B/16 model
and, on 2x larger input image, the result will match.
See (internal link)
Args:
old: original parameter to be resized.
new_hw: target shape (height, width)-only.
Returns:
Resized patch embedding kernel.
"""
assert len(old.shape) == 4, "Four dimensions expected"
assert len(new_hw) == 2, "New shape should only be hw"
if tuple(old.shape[:2]) == tuple(new_hw):
return old
logging.info("FlexiViT: resize embedding %s to %s", old.shape, new_hw)
def resize(x_np, new_shape):
x_tf = tf.constant(x_np)[None, ..., None]
# NOTE: we are using tf.image.resize here to match the resize operations in
# the data preprocessing pipeline.
x_upsampled = tf.image.resize(
x_tf, new_shape, method="bilinear")[0, ..., 0].numpy()
return x_upsampled
def get_resize_mat(old_shape, new_shape):
mat = []
for i in range(np.prod(old_shape)):
basis_vec = np.zeros(old_shape)
basis_vec[np.unravel_index(i, old_shape)] = 1.
mat.append(resize(basis_vec, new_shape).reshape(-1))
return np.stack(mat).T
resize_mat = get_resize_mat(old.shape[:2], new_hw)
resize_mat_pinv = np.linalg.pinv(resize_mat.T)
def resample_kernel(kernel):
resampled_kernel = resize_mat_pinv @ kernel.reshape(-1)
return resampled_kernel.reshape(new_hw)
v_resample_kernel = jax.vmap(jax.vmap(resample_kernel, 2, 2), 3, 3)
return v_resample_kernel(old)
class Patchify(nn.Module):
"""As a class just to match param names with original ViT."""
patch_size: Sequence[int] = (32, 32)
width: int = 768
seqhw: Optional[int] = None
@nn.compact
def __call__(self, image, seqhw=None):
n, h, w, c = image.shape # pylint: disable=unused-variable
w_emb = self.param(
"kernel", nn.initializers.normal(stddev=1/np.sqrt(self.width)),
(*self.patch_size, c, self.width), image.dtype)
b_emb = self.param("bias", nn.initializers.zeros, self.width, image.dtype)
# Compute required patch-size to reach `seqhw` given `image` size.
seqhw = seqhw or self.seqhw
if seqhw is None and self.is_initializing():
patch_size = self.patch_size
else:
patch_size = tuple(np.array((h, w)) // np.array((seqhw, seqhw)))
if patch_size != self.patch_size:
w_emb = resample_patchemb(old=w_emb, new_hw=patch_size)
x = jax.lax.conv_general_dilated(
image, w_emb, window_strides=patch_size, padding="VALID",
dimension_numbers=("NHWC", "HWIO", "NHWC"))
return x + b_emb
class _Model(nn.Module):
"""ViT model."""
num_classes: int
patch_size: Sequence[int] = (32, 32)
posemb_size: Sequence[int] = (7, 7)
width: int = 768
depth: int = 12
mlp_dim: Optional[int] = None # Defaults to 4x input dim
num_heads: int = 12
posemb: str = "learn" # Can also be "sincos2d"
pool_type: str = "gap" # Can also be "map" or "tok"
head_zeroinit: bool = True
seqhw: Optional[int] = None
@nn.compact
def __call__(self, image, *, seqhw=None, train=False):
out = {}
x = out["stem"] = Patchify(
self.patch_size, self.width, self.seqhw, name="embedding")(image, seqhw)
# == Flattening + posemb
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
pos_emb = vit.get_posemb(
self, self.posemb, self.posemb_size, c, "pos_embedding", x.dtype)
if pos_emb.shape[1] != h * w:
pos_emb = jnp.reshape(pos_emb, (1, *self.posemb_size, c))
pos_emb = jax.image.resize(pos_emb, (1, h, w, c), "linear")
pos_emb = jnp.reshape(pos_emb, (1, h * w, c))
x = out["with_posemb"] = x + pos_emb
# == Optional [cls] token
if self.pool_type == "tok":
cls = self.param("cls", nn.initializers.zeros, (1, 1, c), x.dtype)
x = jnp.concatenate([jnp.tile(cls, [n, 1, 1]), x], axis=1)
# == Encoder
n, l, c = x.shape # pylint: disable=unused-variable
x, out["encoder"] = vit.Encoder(
depth=self.depth,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
name="Transformer")(x)
encoded = out["encoded"] = x
if self.pool_type == "map":
x = out["head_input"] = vit.MAPHead(
num_heads=self.num_heads, mlp_dim=self.mlp_dim)(x)
elif self.pool_type == "gap":
x = out["head_input"] = jnp.mean(x, axis=1)
elif self.pool_type == "tok":
x = out["head_input"] = x[:, 0]
encoded = encoded[:, 1:]
else:
raise ValueError(f"Unknown pool type: '{self.pool_type}'")
x_2d = jnp.reshape(encoded, [n, h, w, -1])
out["pre_logits_2d"] = x_2d
out["pre_logits"] = x
if self.num_classes:
kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {}
head = nn.Dense(self.num_classes, name="head", **kw)
x_2d = out["logits_2d"] = head(x_2d)
x = out["logits"] = head(x)
return x, out
def Model(num_classes, *, variant=None, **kw): # pylint: disable=invalid-name
"""Factory function, because linen really don't like what I'm doing!"""
return _Model(num_classes, **{**vit.decode_variant(variant), **kw})
def load(init_params, init_file, model_cfg, dont_load=()): # pylint: disable=invalid-name because we had to CamelCase above.
"""Load init from checkpoint, both old model and this one. +Hi-res posemb."""
init_file = {**vit.VANITY_NAMES, **VANITY_NAMES}.get(init_file, init_file)
restored_params = utils.load_params(None, init_file)
restored_params = vit.fix_old_checkpoints(restored_params)
# Potentially resize the position embedings if seqlen differs.
restored_params["pos_embedding"] = vit.resample_posemb(
old=restored_params["pos_embedding"],
new=init_params["pos_embedding"])
# Potentially resize the patch embedding kernel.
old_patchemb = restored_params["embedding"]["kernel"]
restored_params["embedding"]["kernel"] = resample_patchemb(
old=old_patchemb, new_hw=model_cfg.patch_size)
# possibly use the random init for some of the params (such as, the head).
restored_params = common.merge_params(restored_params, init_params, dont_load)
return restored_params
# Shortcut names for some canonical paper checkpoints:
VANITY_NAMES = {
# pylint: disable=line-too-long
"FlexiViT-L i1k": "gs://big_vision/flexivit/flexivit_l_i1k.npz",
"FlexiViT-B i1k": "gs://big_vision/flexivit/flexivit_b_i1k.npz",
"FlexiViT-S i1k": "gs://big_vision/flexivit/flexivit_s_i1k.npz",
"FlexiViT-B i21k 90ep": "gs://big_vision/flexivit/flexivit_b_i21k_90ep.npz",
"FlexiViT-B i21k 300ep": "gs://big_vision/flexivit/flexivit_b_i21k_300ep.npz",
"FlexiViT-B i21k 1000ep": "gs://big_vision/flexivit/flexivit_b_i21k_1000ep.npz",
"ViT-B/16 i21k": "gs://big_vision/flexivit/vit_b16_i21k_300ep.npz",
"ViT-B/30 i21k": "gs://big_vision/flexivit/vit_b30_i21k_300ep.npz",
# pylint: enable=line-too-long
}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vit vqvae model."""
from absl.testing import absltest
from big_vision.models.proj.uvim import vit
import jax
import jax.numpy as jnp
import ml_collections
class ViTVQVAEModelTest(absltest.TestCase):
def test_model(self):
model_config = ml_collections.ConfigDict({
"input_size": (32, 32),
"code_len": 4,
"width": 16,
"mlp_dim": 64,
"num_heads": 4,
"enc_depth": 1,
"dec_depth": 1,
"with_encoder_ctx": True,
"with_decoder_ctx": True,
"statistics_axis_name": None,
"inputs": {
"in1": (10, 3),
"in2": (25,),
},
"outputs": {
"out1": (5,),
"out2": (20,),
},
})
model = vit.Model(**model_config)
batch_size = 4
seq_len = (32 // 8) ** 2
x = {
"in1": jnp.zeros((batch_size, seq_len, 10, 3)),
"in2": jnp.zeros((batch_size, seq_len, 25)),
}
ctx_image = jnp.zeros((batch_size,) + model_config.input_size + (3,))
init_rngs = {
"params": jax.random.PRNGKey(0),
"state": jax.random.PRNGKey(1),
}
params = model.init(init_rngs, x, ctx=ctx_image)
self.assertEqual(params.keys(), set(["params", "state"]))
apply_rngs = {
"dropout": jax.random.PRNGKey(0),
"vqvae": jax.random.PRNGKey(0),
}
(logits, _), params = model.apply(
params, x, ctx=ctx_image, train=True, update_dict=True,
rngs=apply_rngs, mutable=["state"])
self.assertEqual(logits.keys(), set(["out1", "out2"]))
self.assertEqual(logits["out1"].shape, (batch_size, seq_len, 5))
self.assertEqual(logits["out2"].shape, (batch_size, seq_len, 20))
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple vision-text transformer with encoder-decoder architecture.
Used abbreviations for dimension annotations:
B: batch size.
H: image height.
W: image width.
P: number of patches (PH/PW: number of patches in height/width dimensions).
E: embedding size.
L: sequence length of text tokens.
V: vocab size.
"""
from typing import Sequence
from big_vision import utils
from big_vision.models import common
from big_vision.models import vit
import einops
import flax
import flax.linen as nn
import jax.numpy as jnp
import ml_collections
import numpy as np
def shift_right(x, axis=1):
"""Shift to the right on given axis with padding value 0."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(x, pad_widths, constant_values=0)
return padded[:, :-1]
class EncoderDecoderBlock(nn.Module):
"""Transformer encoder-decoder layer."""
mlp_dim: int
num_heads: int
dropout_rate: float = 0.
decode: bool = False
@nn.compact
def __call__(self, targets, encoded, decoder_mask=None, deterministic=True):
"""Applies EncoderDecoder1DBlock module.
Args:
targets: target text embeddings [B, L, E].
encoded: encoded image patches from encoder [B, P, E].
decoder_mask: decoder self-attention mask.
deterministic: bool, deterministic or not (to apply dropout).
Returns:
output after transformer encoder-decoder block [B, L, E].
"""
# Decoder block.
x = nn.LayerNorm(name="LayerNorm1")(targets)
x = nn.SelfAttention(
num_heads=self.num_heads, use_bias=False, broadcast_dropout=False,
dropout_rate=self.dropout_rate, decode=self.decode, name="SelfAttn")(
x, decoder_mask, deterministic=deterministic)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
x = x + targets
# Encoder-Decoder block.
y = nn.LayerNorm(name="LayerNorm2")(x)
y = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads, use_bias=False, broadcast_dropout=False,
dropout_rate=self.dropout_rate, name="CrossAttn")(
y, encoded, deterministic=deterministic)
y = nn.Dropout(rate=self.dropout_rate)(y, deterministic=deterministic)
y = y + x
# MLP block.
z = nn.LayerNorm(name="LayerNorm3")(y)
z = vit.MlpBlock(mlp_dim=self.mlp_dim, dropout=self.dropout_rate,
name="MLP")(z, deterministic=deterministic)
return y + z
class Decoder(nn.Module):
"""Transformer Model Decoder for sequence to sequence translation."""
emb_dim: int
mlp_dim: int
num_heads: int
num_layers: int
dropout_rate: float = 0.
output_vocab_size: int = 32000
zero_decoder_seq: bool = False
@nn.compact
def __call__(self,
encoded,
targets,
pos_emb,
decoder_mask=None,
decode=False,
deterministic=True,
max_decode_length=None):
"""Applies Transformer model on the inputs.
Args:
encoded: encoded image patches from encoder [B, P, E].
targets: target text tokens [B, L].
pos_emb: positional embeddings.
decoder_mask: decoder self-attention mask.
decode: bool, whether to perform fast autoregressive decoding with cache.
deterministic: bool, deterministic or not (to apply dropout).
max_decode_length: optional max length for positional embeddings.
Returns:
output of a transformer decoder [B, L, V].
"""
y = targets.astype("int32")
if not decode:
y = shift_right(y)
y = nn.Embed(self.output_vocab_size, self.emb_dim, name="EmbedTargets",
embedding_init=nn.initializers.normal(stddev=1.0))(y)
if self.zero_decoder_seq:
y = jnp.zeros_like(y)
y = common.AddPositionEmbs(
decode=decode, name="PosEmbedTargets")(y, pos_emb)
y = nn.Dropout(rate=self.dropout_rate)(y, deterministic=deterministic)
for lyr in range(self.num_layers):
y = EncoderDecoderBlock(
num_heads=self.num_heads, mlp_dim=self.mlp_dim,
dropout_rate=self.dropout_rate, decode=decode,
name=f"EncDecBlock{lyr}")(y, encoded, decoder_mask=decoder_mask,
deterministic=deterministic)
y = nn.LayerNorm(name="LayerNorm")(y)
logits = nn.Dense(self.output_vocab_size, kernel_init=nn.initializers.zeros,
name="LogitsDense")(y)
return logits
class Model(nn.Module):
"""Transformer Model for sequence to sequence translation."""
patches: ml_collections.ConfigDict
# Encoder/decoder shared params:
num_heads: int = 8
num_layers: int = 6
mlp_dim: int = 2048
dropout_rate: float = 0.
# Decoder params:
emb_dim: int = 512
vocab_size: int = 32000
seq_len: int = 256
# Encoder params:
input_size: Sequence[int] = (256, 256)
posemb_type: str = "sincos2d" # Can also be "learn"
zero_decoder_seq: bool = False
def setup(self):
grid_size = np.array(self.input_size) // np.array(self.patches.size)
self.pos_emb_for_encoder = vit.get_posemb(
self, self.posemb_type, grid_size, self.emb_dim,
"pos_embedding_encoder")
self.pos_emb_for_decoder = vit.get_posemb(
self, self.posemb_type, (1, self.seq_len), self.emb_dim,
"pos_embedding_decoder")
self.encoder = vit.Encoder(
depth=self.num_layers,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout=self.dropout_rate)
self.decoder = Decoder(
num_layers=self.num_layers,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
emb_dim=self.emb_dim,
output_vocab_size=self.vocab_size,
zero_decoder_seq=self.zero_decoder_seq,
)
self.conv = nn.Conv(self.emb_dim, self.patches.size, padding="VALID",
strides=self.patches.size, name="EmbedPatches")
def encode(self, image, train=False):
"""Encodes input image or embeddings."""
emb = self.conv(image)
patch_embeddings = einops.rearrange(emb, "B PH PW E -> B (PH PW) E")
encoded, _ = self.encoder(
patch_embeddings + self.pos_emb_for_encoder, deterministic=not train)
return encoded
def decode(self, encoded, targets, decode=False, train=False,
max_decode_length=None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded image patches from encoder [B, P, E].
targets: target text tokens [B, L].
decode: whether to prepare and use an autoregressive cache.
train: whether it is training.
max_decode_length: optional max length for positional embeddings.
Returns:
logits array from transformer decoder [B, L, V].
"""
decoder_mask = None if decode else nn.make_causal_mask(targets)
logits = self.decoder(
encoded,
targets,
pos_emb=self.pos_emb_for_decoder,
decoder_mask=decoder_mask,
decode=decode,
deterministic=not train,
max_decode_length=max_decode_length)
return logits
def __call__(self, image, text, *, decode=False, train=False):
"""Applies Transformer model on the inputs.
Args:
image: batch of images [B, H, W, 3].
text: batch of tokenized texts [B, L].
decode: whether to prepare and use an autoregressive cache.
train: whether it is training.
Returns:
logits array from full transformer [B, L, V].
"""
encoded = self.encode(image, train=train)
return self.decode(encoded, text, decode=decode, train=train)
def load(init_params, init_files, model_params=None,
dont_load=("head/kernel", "head/bias", "cls")):
"""Loads params from init checkpoint and merges into init_params."""
del model_params
if isinstance(init_files, str):
# A shortcut for a single file checkpoint of a vtt model.
ckpt_params = utils.load_params(None, init_files)
ckpt_params = flax.training.checkpoints.convert_pre_linen(ckpt_params)
if init_params is not None:
ckpt_params = common.merge_params(ckpt_params, init_params, dont_load)
else:
init_files = {**init_files} # Shallow copy because we'll pop stuff off.
enc_init = init_files.pop("encoder", None)
if enc_init:
ckpt_params = init_params.copy()
vit_params = {
"pos_embedding": ckpt_params["pos_embedding_encoder"],
"Transformer": ckpt_params["encoder"],
"embedding": ckpt_params["EmbedPatches"],
}
encoder_params = vit.load(
vit_params, enc_init, model_cfg={},
dont_load=dont_load)
ckpt_params["encoder"] = encoder_params["Transformer"]
ckpt_params["pos_embedding_encoder"] = encoder_params["pos_embedding"]
ckpt_params["EmbedPatches"] = encoder_params["embedding"]
else:
raise ValueError("Only encoder init is supported: {}.".format(init_files))
return ckpt_params
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VQ-VAE autoencoder with ViT backbone."""
import functools
from typing import Mapping, Optional, Sequence, Union
from big_vision import utils
from big_vision.models import common
from big_vision.models import vit
import einops
import flax.linen as nn
import flax.training.checkpoints
import jax
import jax.numpy as jnp
import numpy as np
partial = functools.partial
# Multiplicative perturbation applied to codewords when doing the split.
# Note, the multiplicative pertubation is not perfectly symmetric and rep.
# applications can shrink the embedding. However, in practice it does not matter
# for the value we use.
PERTURB = 0.001
# The function below takes a vector `x` and a dictioniary of vectors `e` as an
# input. It then returns a "quantized" version of x (namely the closest to `x`
# vector from `e`) and its index in `e` as well.
# On top of this, it has two extra features:
# 1. Double `vmap` vectorizes this function to operate on many `x` vectors.
# More concretely, we add two extra dimensions (batch and space) to `x`.
# Also note we compute euclidian distance in a decomposed way, because it
# makes it more efficient for vmapping.
# 2. `quantize` is a "discrete" operation, so it does not have a gradient for
# `x`. So we implement a so-called "straight-through" gradient estimator
# using `stop_gradient` magic. It does not affect forward pass, but changes
# the gradient.
@partial(jax.vmap, in_axes=(0, None), out_axes=(0, 0))
@partial(jax.vmap, in_axes=(0, None), out_axes=(0, 0))
def quantize(x, e):
dist = jnp.sum(x * x)[None] - 2 * x.dot(e.T) + jnp.sum(e * e, axis=1)
idx = jnp.argmin(dist)
x_q = jax.lax.stop_gradient(e[idx] - x) + x # just `e[idx]` for the fwd pass.
return x_q, idx
def split_the_most_frequent_embedding(state):
"""Splits most frequent embedding into two and eliminates least frequent.
Args:
state: a dict. that contains current jax rng, embeddings and their counts.
Returns:
New dict. with the updated jax rng, embeddings and counts.
"""
rng, e, c = state["rng"], state["dictionary"], state["counts"]
rng, rng_local = jax.random.split(rng)
i_max = jnp.argmax(c)
i_min = jnp.argmin(c)
e = e.at[i_min].set(
e[i_max] * jax.random.uniform(rng_local, (e.shape[1],), jnp.float32,
1.0-PERTURB, 1.0+PERTURB))
c = c.at[i_min].set(c[i_max] / 2.0)
c = c.at[i_max].set(c[i_max] / 2.0)
e = e.at[i_min].set(e[i_min] / 2.0)
e = e.at[i_max].set(e[i_max] / 2.0)
return {"rng": rng, "dictionary": e, "counts": c}
class Model(nn.Module):
"""ViT model."""
inputs: Mapping[str, Sequence[int]]
outputs: Mapping[str, Sequence[int]]
input_size: Sequence[int] = (256, 256)
patch_size: Sequence[int] = (8, 8)
code_len: int = 256
width: int = 768
enc_depth: int = 6
dec_depth: int = 6
mlp_dim: Optional[int] = None
num_heads: int = 12
posemb: str = "learn" # Can also be "sincos2d"
rep_size: Union[int, bool] = False
dropout: float = 0.0
reinit: Optional[Sequence[str]] = None
head_zeroinit: bool = True
dict_size: int = 512 # Number of words in dict.
codeword_dim: Optional[int] = None
dict_momentum: float = 0.995 # Exp. moving average coeff. for dict. learning.
quantize: bool = True
# Useful to set to None when running without pmap, e.g. testing.
statistics_axis_name: str = "batch"
# Threshold for the discounted count after which the codeword will be
# considered unused. For the `dict_momentum` param of 0.995 the codeword
# should not be present in ~500 batches in a row.
min_count: float = 0.1 # ~= 0.995 ** 500
with_encoder_ctx: bool = False
with_decoder_ctx: bool = False
code_dropout: str = "none"
bottleneck_resize: bool = False
zero_decoder_seq: bool = False
def setup(self):
self.grid_size = np.array(self.input_size) // np.array(self.patch_size)
self.embeddings = {
k: nn.DenseGeneral(features=(self.width,), axis=range(-len(shape), 0),
name=f"embedding_{k}")
for k, shape in self.inputs.items()
}
kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {}
self.heads = {
k: nn.DenseGeneral(features=shape, name=f"head_{k}", **kw)
for k, shape in self.outputs.items()
}
if self.with_encoder_ctx:
self.stem_conv_ctx_enc = nn.Conv(
self.width, self.patch_size, strides=self.patch_size,
padding="VALID", name="ctx_enc_embedding")
if self.with_decoder_ctx:
self.stem_conv_ctx_dec = nn.Conv(
self.width, self.patch_size, strides=self.patch_size,
padding="VALID", name="ctx_dec_embedding")
self.pos_embedding_encoder = vit.get_posemb(
self, self.posemb, self.grid_size, self.width, "pos_embedding_encoder")
self.encoder = vit.Encoder(
depth=self.enc_depth,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout=self.dropout,
name="encoder")
if not self.bottleneck_resize:
self.bottleneck_downsample = self.param(
"bottleneck_downsample",
nn.initializers.xavier_uniform(),
(np.prod(self.grid_size), self.code_len))
norm_init = nn.initializers.normal(stddev=1.0 / np.sqrt(self.dict_size))
self.dictionary = self.variable(
"state", "dictionary",
lambda shape: norm_init(self.make_rng("state"), shape),
(self.dict_size, self.codeword_dim or self.width))
self.counts = self.variable("state", "counts", jnp.ones, (self.dict_size,))
if not self.bottleneck_resize:
self.bottleneck_upsample = self.param(
"bottleneck_upsample",
nn.initializers.xavier_uniform(),
(self.code_len, np.prod(self.grid_size)))
self.pos_embedding_decoder = vit.get_posemb(
self, self.posemb, self.grid_size, self.width, "pos_embedding_decoder")
self.decoder = vit.Encoder(
depth=self.dec_depth,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout=self.dropout,
name="decoder")
self.encoder_head = nn.Dense(self.codeword_dim or self.width)
self.decoder_stem = nn.Dense(self.width)
def get_codewords(self):
e = self.dictionary.value / self.counts.value[:, None]
e = e / jnp.linalg.norm(e, axis=-1, keepdims=True)
return e
def encode(self, x, *, ctx=None, train=False, update_dict=True):
out = {}
out["stem"] = {}
for key, embed in self.embeddings.items():
out["stem"][key] = embed(x[key])
x = sum(out["stem"].values())
if self.with_encoder_ctx:
ctx_tokens = self.stem_conv_ctx_enc(ctx)
ctx_tokens = einops.rearrange(ctx_tokens, "b h w c -> b (h w) c")
x = x + ctx_tokens
x, _ = self.encoder(x + self.pos_embedding_encoder, deterministic=not train)
if self.bottleneck_resize:
x = einops.rearrange(x, "b (h w) c -> b h w c",
h=self.grid_size[0], w=self.grid_size[1])
l = int(np.round(self.code_len ** 0.5))
x = jax.image.resize(
x, (x.shape[0], l, l, x.shape[3]),
method="linear")
x = einops.rearrange(x, "b h w c -> b (h w) c")
else:
x = jnp.einsum("btc,tn->bnc", x, self.bottleneck_downsample)
x = self.encoder_head(x)
x = jax.nn.standardize(x, axis=-1)
x_pre_q = out["bottleneck"] = x
e = self.get_codewords()
x, idx = quantize(x, e)
out["bottleneck_q"] = x
out["code"] = idx
# Implements explicit dictionary learning algo outlined in the VQ-VAE paper.
# We slightly deviate from the papers formulation, as we find it confusing,
# especially in the multi-host scenario. What is implemented below can be
# seen as computing discounted counts and sums of all embeddings.
if train:
# Compute counts and sum(x) of code in the global batch.
counts = jnp.zeros(self.dict_size, dtype=jnp.int32)
counts = counts.at[idx].add(1)
# Below we introduce redundant stop_gradient, because jax' dead code
# elimination for our program's gradient fails to infer that the code
# below does not require gradient computation.
# Relevant github issue: https://github.com/google/jax/issues/9042.
# TODO: remove stop_gradient when the bug is fixed.
x_sum = jnp.zeros_like(self.dictionary.value)
x_sum = x_sum.at[idx].add(jax.lax.stop_gradient(x_pre_q))
if self.statistics_axis_name:
counts = jax.lax.psum(counts, axis_name=self.statistics_axis_name)
x_sum = jax.lax.psum(x_sum, axis_name=self.statistics_axis_name)
out["codebook_max_ratio"] = jnp.max(counts) / jnp.sum(counts)
out["codebook_zeros_ratio"] = jnp.sum(counts == 0) / len(counts)
if update_dict:
self.counts.value = self.counts.value * self.dict_momentum + counts
self.dictionary.value = (self.dictionary.value * self.dict_momentum +
x_sum)
state = {"dictionary": self.dictionary.value,
"counts": self.counts.value,
"rng": self.make_rng("vqvae")}
new_state = jax.lax.while_loop(
lambda state: jnp.any(state["counts"] < self.min_count),
split_the_most_frequent_embedding,
state)
self.counts.value = new_state["counts"]
self.dictionary.value = new_state["dictionary"]
if not self.quantize:
x = x_pre_q
out["bottleneck_q"] = x
return x, out
def decode(self, x, ctx=None, discrete_input=False, train=False):
out = {}
if discrete_input:
e = self.get_codewords()
x = e[x]
if self.zero_decoder_seq:
x = jnp.zeros_like(x)
if train and self.code_dropout != "none":
importance = jnp.linspace(1.0, 0.0, self.code_len + 2)[1:-1]
thr = jax.random.uniform(self.make_rng("dropout"), x.shape[:1])
mask = importance[None, :] > thr[:, None]
if self.code_dropout == "random":
mask = jax.random.permutation(
self.make_rng("dropout"), mask, axis=-1, independent=True)
x = x * mask[:, :, None]
x = self.decoder_stem(x)
if self.bottleneck_resize:
l = int(np.round(self.code_len ** 0.5))
x = einops.rearrange(x, "b (h w) c -> b h w c", h=l, w=l)
x = jax.image.resize(
x, (x.shape[0], self.grid_size[0], self.grid_size[1], x.shape[3]),
method="linear")
x = einops.rearrange(x, "b h w c -> b (h w) c")
else:
x = jnp.einsum("bnc,nt->btc", x, self.bottleneck_upsample)
if self.with_decoder_ctx:
ctx_tokens = self.stem_conv_ctx_dec(ctx)
ctx_tokens = einops.rearrange(ctx_tokens, "b h w c -> b (h w) c")
x = x + ctx_tokens
x, _ = self.decoder(x + self.pos_embedding_decoder)
out["logits"] = {}
for key, head in self.heads.items():
out["logits"][key] = head(x)
return out["logits"], out
def __call__(self, x, *, ctx=None, train=False, update_dict=True):
x, out_enc = self.encode(x, ctx=ctx, train=train, update_dict=update_dict)
x, out_dec = self.decode(x, ctx=ctx, train=train)
return x, {**out_enc, **out_dec}
def load(init_params, init_file, model_params=None, dont_load=()):
"""Loads params from init checkpoint and merges into init_params."""
del model_params
ckpt = flax.core.unfreeze(utils.load_checkpoint(None, init_file))
params = {"params": ckpt["params"], "state": ckpt["state"]}
params = flax.training.checkpoints.convert_pre_linen(params)
# Fix old-style param name.
if "Encoder" in params["params"]:
p = params["params"]
p["encoder"] = p.pop("Encoder")
p["decoder"] = p.pop("Decoder")
params["params"] = p
if init_params is not None:
params = common.merge_params(params, init_params, dont_load)
return params["params"], params["state"]
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vision-text-transformer."""
from absl.testing import absltest
from big_vision.models.proj.uvim import vtt
import jax
import jax.numpy as jnp
import ml_collections
class VTTTest(absltest.TestCase):
def test_vtt_with_1_step(self):
model_config = ml_collections.ConfigDict(dict(
input_size=(224, 224),
patches={"size": (16, 16)},
num_heads=2,
num_layers=2,
mlp_dim=128,
emb_dim=64,
vocab_size=500))
batch_size, max_len = 8, 50
image = jnp.ones((batch_size, 224, 224, 3))
text = jnp.ones((batch_size, max_len), dtype=jnp.int32)
m = vtt.Model(**model_config)
variables = m.init(jax.random.PRNGKey(42), image, text)
self.assertCountEqual(variables.keys(), ["params"])
params = variables["params"]
out = m.apply({"params": params}, image, text)
expected_shape = (batch_size, max_len, model_config.vocab_size)
self.assertEqual(out.shape, expected_shape)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference."""
import functools
from typing import Any, Callable, Optional, Tuple
import flax
from flax import linen as nn
import jax
from jax import lax
from jax import numpy as jnp
import numpy as np
EOS_ID = 1
NEG_INF = np.array(-1.0e7) # Effective negative infinity.
GenerateFn = Callable[...,
Tuple[jnp.ndarray, jnp.ndarray, Optional[jnp.ndarray]]]
def temperature_sampling(*args, temperature=1.0, top_k=0, top_p=0.0, **kwargs):
"""Convenience wrapper for temperature sampling."""
return generate(*args, generate_fn=_temperature_sampling,
temperature=temperature,
top_k=top_k,
top_p=top_p,
**kwargs)
def topk_sampling(*args, temperature=1.0, top_k=20, **kwargs):
"""Convenience wrapper for top-k sampling."""
return generate(*args, generate_fn=_temperature_sampling,
temperature=temperature,
top_k=top_k,
top_p=0.0,
**kwargs)
def nucleus_sampling(*args, temperature=1.0, top_p=0.2, **kwargs):
"""Convenience wrapper for nucleus sampling."""
return generate(*args, generate_fn=_temperature_sampling,
temperature=temperature,
top_k=0,
top_p=top_p,
**kwargs)
def argmax_sampling(*args, **kwargs):
"""Convenience wrapper for argmax sampling."""
return generate(*args, generate_fn=_temperature_sampling,
temperature=1e-7,
top_k=0,
top_p=0.0,
**kwargs)
def generate(params, inputs, prompts, seed, *,
model: nn.Module,
generate_fn: GenerateFn,
num_samples: int = 1,
prefill: bool = False,
eos_token: int = EOS_ID,
**generate_fn_kwargs):
"""Generate sequence with fast decoding beam search on a batch.
Model must support:
encode(inputs) -> encoded, or encode(*inputs) -> encoded.
decode(encoded, prompts, decode=True/False, max_decode_length) -> logits
Args:
params: model parameters.
inputs: either a single `jnp.ndarray` of e.g. images, or
a tuple of inputs which are passed via `model.encode(*inputs)`.
prompts: [batch_size, max_decode_len] forced tokens for generation.
prompts need to finish with 0 token, they should not contain the end
markers. If no prompting is required, pass an all zeros tensor.
seed: PRNG key for random sampling.
model: object with methods encode and decode.
generate_fn: search or sampling function to generate sequences.
num_samples: number of samples to generate per item.
prefill: whether to prefill cache.
eos_token: if of end-of-sentence token for target vocabulary.
**generate_fn_kwargs: generate fn specific kwargs.
Returns:
Top-scoring sequences (worst scores first).
[batch_size, num_samples, max_decode_len]
Scores of the generated sequences (worst scores first). The
returned scores are modified log probabilities. May be absent.
[batch_size, max_decode_len]
Log probs for the generated tokens. May be absent.
[batch_size, num_samples, max_decode_len]
"""
_, max_decode_len = prompts.shape
decode_kwargs = {"max_decode_length": max_decode_len}
def encode(model, inputs):
if not isinstance(inputs, tuple):
inputs = (inputs,)
return model.encode(*inputs)
encoded_inputs = nn.apply(encode, model)(params, inputs)
if isinstance(encoded_inputs, tuple):
encoded_inputs, enc_pos_emb = encoded_inputs
decode_kwargs["enc_pos_emb"] = enc_pos_emb
def init_cache(model):
encoded = jnp.zeros_like(encoded_inputs)
targets = jnp.zeros_like(prompts)
return model.decode(encoded, targets, decode=True, **decode_kwargs)
cache = nn.apply(init_cache, model, mutable=True)(params)[1]["cache"]
def prefill_cache(model, encoded, targets):
return model.decode(encoded, targets, prefill=True, **decode_kwargs)
if prefill:
cache = nn.apply(prefill_cache, model, mutable=True)(
{"params": params["params"], "cache": cache},
encoded_inputs, prompts)[1]["cache"]
def tokens_to_logits(tokens, cache):
def decode_step(model, tokens):
encoded = expand_samples_dim_and_flatten(
encoded_inputs, num_samples)
return model.decode(encoded, tokens, decode=True, **decode_kwargs)
logits, aux = nn.apply(decode_step, model, mutable=True)(
{"params": params["params"], "cache": cache}, tokens)
return logits.squeeze(axis=1), aux["cache"]
beam_seqs, scores, logprobs = generate_fn(
prompts,
cache,
tokens_to_logits,
num_samples=num_samples,
eos_token=eos_token,
max_decode_len=max_decode_len,
seed=seed,
**generate_fn_kwargs)
return beam_seqs, scores, logprobs
def expand_samples_dim(x, num_samples):
"""Creates new dimension in non-scalar array and tiles into it."""
if x.ndim == 0: # ignore scalars (e.g. cache index)
return x
x = jnp.expand_dims(x, axis=1)
tile_dims = [1] * x.ndim
tile_dims[1] = num_samples
return jnp.tile(x, tile_dims)
def flatten_samples_dim(x):
"""Flattens samples dim into batch dim."""
if x.ndim == 0: # ignore scalars (e.g. cache index)
return x
return x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
def unflatten_samples_dim(x, batch_size, num_samples):
"""Unflattens first dim into batch and samples dims."""
if x.ndim == 0: # ignore scalars (e.g. cache index)
return x
assert batch_size * num_samples == x.shape[0]
return x.reshape((batch_size, num_samples) + x.shape[1:])
def expand_samples_dim_and_flatten(x, num_samples):
"""Expands the each batch item by num_samples in batch dimension."""
return flatten_samples_dim(expand_samples_dim(x, num_samples))
def cache_map(fn, cache):
"""Maps function over caches, even multiple caches in various layers."""
frozen = isinstance(cache, flax.core.FrozenDict)
if frozen:
cache = flax.core.unfreeze(cache)
flat_cache = flax.traverse_util.flatten_dict(cache)
# Exclude cached relative position bias from beam expansion, etc.
keyvals = {k: v for k, v in flat_cache.items() if k[-1] != "cached_bias"}
keyvals = jax.tree_map(fn, keyvals)
flat_cache.update(keyvals)
new_cache = flax.traverse_util.unflatten_dict(flat_cache)
if frozen:
new_cache = flax.core.freeze(new_cache)
return new_cache
@flax.struct.dataclass
class LoopState:
"""Internal state of the temperature sampling loop."""
# Position in the sequence that we are currently looking at.
cur_index: int
# Cache for fast auto-regressive decoding.
cache: Any
# Flags indicating whether the sequence reached eos [B*N].
flags_finished: jnp.ndarray
# Sequences being generated [B*N, L+1]. Note: sequences start with 0 token.
sequences: jnp.ndarray
scores: jnp.array # Total sequence scores per batch element [B*N].
logprobs: jnp.array # Logprobs of selected tokens [B*N, L].
rng: jnp.ndarray # PRNGKey of the loop state.
def _init_state(prompts, cache, init_rng_key, num_samples):
batch_size, max_decode_len_plus_one = prompts.shape
# Add extra samples dim to attention cache pytree elements.
cache = cache_map(
lambda x: expand_samples_dim_and_flatten(x, num_samples), cache)
return LoopState(
cur_index=0,
cache=cache,
flags_finished=jnp.zeros((batch_size*num_samples), dtype=jnp.bool_),
sequences=expand_samples_dim_and_flatten(prompts, num_samples),
scores=jnp.zeros((batch_size*num_samples)),
logprobs=jnp.zeros((batch_size*num_samples, max_decode_len_plus_one-1)),
rng=init_rng_key)
def _should_temperature_sampling_continue(state, max_decode_len):
"""Check if we should continue or not."""
max_length_not_reached = state.cur_index < max_decode_len - 1
all_seqs_finished = jnp.all(state.flags_finished)
return max_length_not_reached & (~all_seqs_finished)
def _temperature_sampling_iteration(state, tokens_to_logits, temperature, eos,
top_k, top_p, mask_token_ids=()):
"""Temperature sampling step function."""
rng_sampling, rng = jax.random.split(state.rng)
# 1. Use the model to generate a distribution over the vocabulary (for the
# next token) and sample from it, optionally applying the temperature.
# --> [B,].
cur_tokens = state.sequences[:, state.cur_index]
logits, new_cache = tokens_to_logits(cur_tokens[:, None], state.cache)
assert logits.ndim == 2, ("tokens_to_logits expected to return a"
f"2-dimensional array [B, V], got {logits.ndim}"
"dimensions.")
logprobs = jax.nn.log_softmax(logits)
# Do not sample special tokens in with ids in mask_token_ids.
if mask_token_ids:
probs = jax.nn.softmax(logits)
for i in mask_token_ids:
probs = probs.at[:, i].set(0.)
probs = probs / jnp.sum(probs, -1, keepdims=True)
logits = jnp.log(probs)
if top_p: # Nucleus sampling.
logits_sorted = jnp.sort(logits, axis=-1)[:, ::-1]
sorted_cum_probs = jnp.cumsum(
jax.nn.softmax(logits_sorted, axis=-1), axis=-1)
cutoff_index = jnp.sum(sorted_cum_probs < top_p, axis=-1, keepdims=True)
cutoff_logit = jnp.take_along_axis(logits_sorted, cutoff_index, axis=-1)
logits = jnp.where(logits < cutoff_logit,
jnp.full_like(logits, NEG_INF), logits)
if top_k:
topk_logits, topk_indices = jax.lax.top_k(logits, top_k)
topk_token = jax.random.categorical(rng_sampling, topk_logits / temperature)
sampled_tokens = jnp.squeeze(
jnp.take_along_axis(topk_indices, jnp.expand_dims(topk_token, -1),
axis=-1), axis=-1)
else:
sampled_tokens = jax.random.categorical(rng_sampling, logits / temperature)
sampled_logprobs = jnp.squeeze(jnp.take_along_axis(
logprobs, jnp.expand_dims(sampled_tokens, axis=1), axis=-1), axis=-1)
# 2. Use the sampled tokens to update the sequences that did not finish yet,
# but only if they are out of prompt.
next_tokens = state.sequences[:, state.cur_index + 1]
next_logprobs = jnp.squeeze(jnp.take_along_axis(
logprobs, jnp.expand_dims(next_tokens, axis=1), axis=-1), axis=-1)
out_of_prompt = next_tokens == 0
update_pos = out_of_prompt * (~state.flags_finished)
next_tokens = sampled_tokens * update_pos + next_tokens * (~update_pos)
sampled_logprobs = update_pos*sampled_logprobs + ~update_pos*next_logprobs
sequences = state.sequences.at[:, state.cur_index + 1].set(next_tokens)
scores = state.scores + sampled_logprobs
seqs_logprobs = state.logprobs.at[:, state.cur_index].set(sampled_logprobs)
# 3. Update the finished flags. Only out of prompts seqs can finish.
flags_finished = out_of_prompt & (state.flags_finished |
(sampled_tokens == eos))
return LoopState(
cur_index=state.cur_index+1,
cache=new_cache,
flags_finished=flags_finished,
sequences=sequences,
scores=scores,
logprobs=seqs_logprobs,
rng=rng)
def _temperature_sampling(prompts, cache, tokens_to_logits, num_samples=1,
eos_token=EOS_ID, max_decode_len=None,
seed=0, temperature=1., top_k=0, top_p=0.0,
mask_token_ids=()):
"""Temperature sampling.
Purely stochastic sampling-based greedy procedure to generate sequences. Every
next token in the sequence is sampled from the discrete vocab distribution
produced by the auto-regressive sequence model. Optionally we can adjust the
distribution by changing the temperature before sampling from it. Generated
sequences are no longer than max_decode_len.
Args:
prompts: optional prompts [B, L]. By default (None), we call free form
generation without any prompts. Prompt sequences should finish with
trailing zeros and should not contain eos tokens.
cache: cache for fast decoding (generation).
tokens_to_logits: fast autoregressive decoder function taking single token
slices and cache and returning next-token logits and updated cache.
num_samples: int: number of samples to generate per batch item. Note, no
deduplication is performed, and in dependence of parameter settings, same
sequences could be generated and returned.
eos_token: end-of-sentence token.
max_decode_len: maximal length of generated sequences (L).
seed: PRNGKey for random sampling.
temperature: positive real-valued sampling temperature. By default we sample
from the original distribution. As the temperature approaches 0., the
entire distribution concentrates on the most probable outcome(s).
top_k: limit sampling to only top-k logits. Zero means no limit.
top_p: limit sampling to smallest number of top logits with max cumulative
prob <= top_p. Zero means no limit. Cannot use both top_p and top_k.
mask_token_ids: if set then tokens with given ids are not sampled.
Returns:
sequences: generated sequences [B, num_samples, L].
scores: not implemented in the naive temperature sampling [B, num_samples].
logprobs: Log probabilities for the generated tokens [B, num_samples, L].
"""
if top_k > 0 and top_p > 0.0:
raise ValueError(f"Cannot use both top_k {top_k} and top_p {top_p}.")
if max_decode_len is None:
max_decode_len = prompts.shape[1]
# We will start generating sequences from 0 token.
prompts = jnp.pad(prompts, ((0, 0), (1, 0)))
eos = jnp.array(eos_token)
if isinstance(seed, int):
seed = jax.random.PRNGKey(seed)
# Initialize the state.
loop_init_state = _init_state(prompts, cache, seed, num_samples)
should_temperature_sampling_continue_fn = functools.partial(
_should_temperature_sampling_continue,
max_decode_len=max_decode_len+1) # Account for prompt padding with 0's.
temperature_sampling_iteration_fn = functools.partial(
_temperature_sampling_iteration,
tokens_to_logits=tokens_to_logits,
temperature=temperature, top_k=top_k, top_p=top_p,
eos=eos, mask_token_ids=mask_token_ids)
# Run the temperature sampling and generate the sequences.
final_state = lax.while_loop(
should_temperature_sampling_continue_fn,
temperature_sampling_iteration_fn,
loop_init_state)
# Return the generated sequences, discarding the 0 token in the beginning.
return (
final_state.sequences[:, 1:].reshape((-1, num_samples, max_decode_len)),
final_state.scores.reshape((-1, num_samples)),
final_state.logprobs.reshape((-1, num_samples, max_decode_len)))
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoders both for text and for images."""
import importlib
from typing import Any, Optional, Tuple, Union
from absl import logging
from big_vision import utils
import flax.linen as nn
import jax.numpy as jnp
ConfigDict = Any
class Model(nn.Module):
"""Two towers transformer."""
image: Optional[ConfigDict] = None
text: Optional[ConfigDict] = None
text_model: str = "proj.image_text.text_transformer"
image_model: str = "vit"
out_dim: Union[int, Tuple[int, int]] = 128
temperature_init: float = 1.0
bias_init: Optional[float] = None
@nn.compact
def __call__(self, image, text=None, **kw):
"""Returns (B,C) image and (B,C) text representations."""
# Support calling without text or without image, for example for few-shot.
ztxt, zimg = None, None
out = {}
out_dims = self.out_dim
if isinstance(out_dims, int):
out_dims = (out_dims, out_dims)
# Embed the text:
if text is not None:
text_model = importlib.import_module(
f"big_vision.models.{self.text_model}"
).Model(**{"num_classes": out_dims[1], **(self.text or {})}, name="txt")
ztxt, out_txt = text_model(text, **kw)
for k, v in out_txt.items():
out[f"txt/{k}"] = v
# Normalize the embeddings the models give us.
out["txt/norm"] = jnp.linalg.norm(ztxt, axis=1, keepdims=True)
out["txt/normalized"] = ztxt = ztxt / (out["txt/norm"] + 1e-8)
if image is not None:
image_model = importlib.import_module(
f"big_vision.models.{self.image_model}"
).Model(**{"num_classes": out_dims[0], **(self.image or {})}, name="img") # pylint: disable=not-a-mapping
zimg, out_img = image_model(image, **kw)
for k, v in out_img.items():
out[f"img/{k}"] = v
# Normalize the embeddings the models give us.
out["img/norm"] = jnp.linalg.norm(zimg, axis=1, keepdims=True)
out["img/normalized"] = zimg = zimg / (out["img/norm"] + 1e-8)
temp_init = jnp.log(self.temperature_init)
t = self.param("t",
lambda key, shape, dtype: temp_init * jnp.ones(shape, dtype),
(1,), jnp.float32)
out["t"] = jnp.exp(t)
out["t/parameter"] = t
if (b_init := self.bias_init) is not None:
out["b"] = self.param("b", lambda k, s, d: b_init * jnp.ones(s, d),
(1,), jnp.float32)
# We could actually play with pre-multiplying by temperature here, such
# that out["t"] is nothing special to the trainer anymore.
return zimg, ztxt, out
def load(init_params, init_files, model_cfg, img_load_kw={}, txt_load_kw={}): # pylint: disable=dangerous-default-value
"""Loads both towers, `init_files` is now a dict with `img` and `txt` keys."""
if isinstance(init_files, str):
# A shortcut for a single file checkpoint of a two_towers model.
if "bias_init" in model_cfg.keys():
logging.info("loading img, txt, t, and b from a single checkpoint.")
init_files = {k: f"{init_files}:{k}" for k in ("img", "txt", "t", "b")}
else:
logging.info("loading img, txt, and t from a single checkpoint.")
init_files = {k: f"{init_files}:{k}" for k in ("img", "txt", "t")}
else:
init_files = {**init_files} # Shallow copy because we'll pop stuff off.
restored_params = {**init_params}
img_init = init_files.pop("image", init_files.pop("img", None))
if img_init:
restored_params["img"] = importlib.import_module(
f"big_vision.models.{model_cfg.get('image_model', 'vit')}"
).load(init_params["img"], img_init, model_cfg.image, **img_load_kw)
txt_init = init_files.pop("text", init_files.pop("txt", None))
if txt_init:
restored_params["txt"] = importlib.import_module(
f"big_vision.models.{model_cfg.get('text_model', 'proj.image_text.text_transformer')}" # pylint: disable=line-too-long
).load(init_params["txt"], txt_init, model_cfg.text, **txt_load_kw)
t_init = init_files.pop("temperature", init_files.pop("t", None))
if t_init:
restored_params["t"] = utils.load_params(None, t_init)
b_init = init_files.pop("bias", init_files.pop("b", None))
if b_init:
restored_params["b"] = utils.load_params(None, b_init)
assert not init_files, (
f"There's something unused left in `config.model_init`. You probably got "
f"a typo. Here it is: {init_files}")
return restored_params
|
# Copyright 2023 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoders for text, similar to CLIP."""
from typing import Any
from big_vision import utils
from big_vision.models import common
from big_vision.models import vit
import flax.linen as nn
import flax.training.checkpoints
import numpy as np
ConfigDict = Any
class _Model(nn.Module):
"""Text transformer similar to CLIP."""
# Differences to CLIP text encoder (gpt-2) that I am aware of:
# 1. https://imgur.com/HNi3jix (gpt-1)
# 2. https://imgur.com/qKGZgBR (gpt-2)
# 3. https://imgur.com/a/xrpYHF0 (clip)
# - LayerNorm is on res-path (like pre-activation resnet)
# - dropout 0.1 everywhere
# - init as var=0.02, scaled by depth
# - BOS and EOS tokens, take repr from EOS.
# - self-attention is autoregressively masked.
# - scaled in width only, with the image model.
num_classes: int
width: int = 512
depth: int = 12
mlp_dim: int = 2048
num_heads: int = 8
dropout: float = 0.0
vocab_size: int = 32_000
pool_type: str = "last"
@nn.compact
def __call__(self, text, *, train=False):
out = {}
# We can't use where/argwhere since the output shape is not fixed.
# Here we use the fact that sequences are padded with EOS tokens, that the
# EOS token has value 1, and that argmin returns the first index.
# eos_indices = jnp.argmin(text, axis=1)
embedding = nn.Embed(num_embeddings=self.vocab_size, features=self.width)
x = out["embedded"] = embedding(text)
# Add posemb
n, l, d = x.shape # pylint: disable=unused-variable
x = x + self.param("pos_embedding",
nn.initializers.normal(stddev=1/np.sqrt(d)),
(1, l, d), x.dtype)
x, encoder_out = vit.Encoder(
depth=self.depth, mlp_dim=self.mlp_dim, num_heads=self.num_heads,
dropout=self.dropout)(x, deterministic=not train)
out.update({"transformed": x, **encoder_out})
# Share weights between embeddings and logit transformation.
out["vocab_logits"] = embedding.attend(x)
if self.pool_type == "last":
# Assuming "sticky" EOS tokenization, last token is always EOS.
x = out["pre_logits"] = x[:, -1, :]
elif self.pool_type == "first":
x = out["pre_logits"] = x[:, 0, :]
elif self.pool_type in ("mean", "gap"):
x = out["pre_logits"] = x.mean(axis=1)
elif self.pool_type in ("max", "gmp"):
x = out["pre_logits"] = x.max(axis=1)
elif self.pool_type == "map":
x = out["pre_logits"] = vit.MAPHead(
num_heads=self.num_heads, mlp_dim=self.mlp_dim)(x)
else:
raise NotImplementedError(f"Cannot do pooling '{self.pool_type}'")
x = out["logits"] = nn.Dense(self.num_classes, name="head")(x)
return x, out
def Model(num_classes, *, variant=None, **kw): # pylint: disable=invalid-name
"""Factory function, because linen really don't like what I'm doing!"""
return _Model(num_classes, **{**vit.decode_variant(variant), **kw})
def load(init_params, init_file, model_cfg, dont_load=()): # pylint: disable=invalid-name
"""Load init from checkpoint, both old model and this one. +Hi-res posemb."""
del model_cfg # unused
params = utils.load_params(None, init_file)
params = flax.core.unfreeze(
flax.training.checkpoints.convert_pre_linen(params))
# Some older (but expensive to train) checkpoints had the posemb added twice
# by mistake. We detect this here and merge them.
extra_posemb = params["Encoder_0"].pop("pos_embedding", 0)
params["pos_embedding"] += extra_posemb
return common.merge_params(params, init_params, dont_load)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for distillation as in https://arxiv.org/abs/2106.05237.
It works by having a (set of) teacher model(s) defined the same way as student
in the config and, for now, only distilling logits with one of many loss
functions.
We explored distilling intermediate feature maps, extra data, and other tricks
in depth in two interships in a separate prototype codebase but eventually they
are not necessary, and thus not (yet?) implemented in this codebase.
Thus, for now, there are no extra learnable parameters besides the student.
This keeps code relatively simple.
"""
# pylint: disable=consider-using-from-import
import functools
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.evaluators.proj.distill.distance as dd
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
from tensorflow.io import gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def getfirst(d, *keys):
"""Returns the first of `keys` that's present in mapping `d`."""
result, found = None, False
for k in reversed(keys):
if k in d:
result, found = d[k], True
if found:
return result
else:
raise KeyError(f"None of {keys} is in {d.keys()}")
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info(
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image", "ops_text"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
write_note("Initializing train dataset...")
train_ds, ntrain_img = input_pipeline.training(config.input)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
total_steps = u.steps("total", config, ntrain_img, batch_size)
def get_steps(name, default=ValueError, cfg=config):
return u.steps(name, cfg, ntrain_img, batch_size, total_steps, default)
u.chrono.inform(total_steps=total_steps, global_bs=batch_size,
steps_per_epoch=ntrain_img / batch_size,
measure=mw.measure, write_note=write_note)
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
# Create student and teacher models
def get_model_mod(name): # Used many times.
mod_name = config[f"{name}_name"]
return importlib.import_module(f"big_vision.models.{mod_name}")
write_note("Initializing models...")
def make_model(name):
return get_model_mod(name).Model(
num_classes=config.num_classes, **config.get(name, {}))
models = {
"student": make_model("student"),
**{t: make_model(t) for t in config.teachers}
}
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
def get_init(model, name):
@functools.partial(jax.jit, backend="cpu")
def _init(rng):
bs = batch_size // jax.device_count()
img_size = tuple(getfirst(train_ds.element_spec, name, "image").shape[1:])
no_image = jnp.zeros((bs,) + img_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, no_image))["params"]
# Set bias in the head to a low value, such that loss is small initially.
if "init_head_bias" in config:
params["head"]["bias"] = jnp.full_like(params["head"]["bias"],
config["init_head_bias"])
return params
return _init
rng, *rng_inits = jax.random.split(rng, len(models) + 1)
with u.chrono.log_timing("z/secs/init"):
params_cpu = {
name: get_init(models[name], name=name)(r)
for name, r in zip(models, rng_inits)}
if jax.process_index() == 0:
for name, params in params_cpu.items():
parameter_overview.log_parameter_overview(params, msg=f"{name} params")
mw.measure(f"num_params_{name}",
sum(p.size for p in jax.tree_leaves(params)))
write_note(f"Initializing {config.optax_name} optimizer...")
# For now, we explicitly only optimize the student parameters as there's
# nothing else to be optimized. If we ever want to add learnable projections
# or similar for good (we explored but ditched), need to refactor this a bit.
tx, sched_fns = bv_optax.make(
config, params_cpu["student"], sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu["student"])
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@jax.named_call
def loss_fn(student_params, params, data, rngs):
# Note: need to extract and use `student_params` out of `params` because the
# first argument of `loss_fn` is what's differentiated wrt.
params["student"] = student_params
def fwd(name, params):
return jax.named_call(models[name].apply, name=name)(
{"params": params}, getfirst(data, name, "image"),
train=name == "student", rngs=rngs.get(name)
)[0] # logits, unused_outputs
logits = {name: fwd(name, w) for name, w in params.items()}
measurements = {}
for name, lg in logits.items():
measurements[f"entropy_{name}"] = -jnp.sum(
jax.nn.log_softmax(lg) * jax.nn.softmax(lg), axis=-1)
if "labels" in data:
measurements[f"task_loss_{name}"] = u.softmax_xent(
logits=lg, labels=data["labels"], reduction=False)
# NOTE: xent is linear in labels, so for KL, this is actually the same as
# using a teacher-ensemble in probs-space!
measurements["distill_loss"] = 0.0
for name in config.teachers:
l = dd.dist(logits["student"], logits[name], config.get("distance", "kl"),
**config.get("distance_kw", {}))
measurements[f"distill_loss_{name}"] = l
measurements["distill_loss"] += l
outputs = (measurements["distill_loss"], measurements)
return jax.tree_map(jnp.mean, outputs)
@functools.partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1))
def update_fn(params, opt, rng, data):
"""Update step."""
# Mixup. Note: overwrites the `data` entries (that's intended).
if config.get("mixup") and config.mixup.p:
to_mix = {name: data[name]
for name in ("image", "labels") + tuple(models) if name in data}
rng, _, to_mix = u.mixup(rng, **config.mixup, **to_mix)
data = {**data, **to_mix}
# Get device-specific loss rng.
rng, *rng_models = jax.random.split(rng, len(models) + 1)
rngs_models_local = {
name: {"dropout": jax.random.fold_in(rngi, jax.lax.axis_index("batch"))}
for name, rngi in zip(models, rng_models)
}
w = params["student"] # Need to explicitly pull out the optimized ones.
(l, measurements), grads = jax.lax.pmean(
jax.value_and_grad(loss_fn, has_aux=True)(
w, params, data, rngs=rngs_models_local),
axis_name="batch")
updates, opt = tx.update(grads, opt, w)
w = optax.apply_updates(w, updates)
params["student"] = w
# Take some logging measurements
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(w)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, rng, l, measurements
# We always load the teachers first, because they NEED to be initialized
# and since we don't ever modify them, we don't store them in checkpoints.
for name in config.teachers:
init_def = config[f"{name}_init"]
write_note(f"Initializing {name} from {init_def}…")
params_cpu[name] = get_model_mod(name).load(
params_cpu[name], init_def, config[name],
**config.get(f"{name}_load", {}))
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize student from something, e.g. start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
# NOTE: we never change the teachers, so only checkpoint student here.
checkpoint = {
"params": params_cpu["student"],
"opt": opt_cpu,
"chrono": u.chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu["student"], opt_cpu = checkpoint["params"], checkpoint["opt"]
u.chrono.load(checkpoint["chrono"])
elif config.get("student_init"):
write_note(f"Initialize student from {config.student_init}...")
params_cpu["student"] = get_model_mod("student").load(
params_cpu["student"], config.student_init, config.get("student"),
**config.get("student_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu["student"], msg="restored (student) params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
u.chrono.inform(first_step=first_step)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{u.chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
# Define predict functions that the evaluators can use:
# 1. One per model
predict_fns = {}
for name, model in models.items():
def fwd(params, image, n=name, m=model):
return m.apply({"params": params[n]}, image)
predict_fns[f"{name}_fwd"] = fwd
# 2. One for the ensemble of all teachers.
def teacher_ensemble_fwd(params, image):
all_teacher_logits = [
models[name].apply(params[name], image)[0] # return is `logits, out`
for name in config.teachers
]
return jnp.mean([jax.nn.softmax(l) for l in all_teacher_logits], axis=0), {}
predict_fns["teacher_ensemble_fwd"] = teacher_ensemble_fwd
# 3.One for each (student, teacher) pair, eg for distance eval.
for name in [*config.teachers, "teacher_ensemble"]:
def fwd(params, image, n=name): # pylint: disable=function-redefined
student_ret = predict_fns["student_fwd"](params, image)
teacher_ret = predict_fns[f"{n}_fwd"](params, image)
return student_ret, teacher_ret
predict_fns[f"student_{name}_fwd"] = fwd
# Only initialize evaluators when they are first needed.
@functools.lru_cache(maxsize=None)
def evaluators():
return eval_common.from_config(
config, predict_fns,
lambda s: write_note(f"Init evaluator: {s}…\n{u.chrono.note}"),
lambda key, cfg: get_steps(key, default=None, cfg=cfg),
)
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{u.chrono.note}")
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
with u.chrono.log_timing("z/secs/update0", noop=step > first_step + 1):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop, batch)
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or u.chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
u.chrono.tick(step)
if not np.isfinite(l):
raise RuntimeError(f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
u.chrono.pause(wait_for=(params_repl["student"], opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu["student"], opt_cpu = jax.tree_map(
lambda x: np.array(x[0]), (params_repl["student"], opt_repl))
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu["student"],
"opt": opt_cpu,
"chrono": u.chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
u.chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators():
if u.itstime(step, log_steps, total_steps, last=False):
u.chrono.pause(wait_for=params_repl)
u.chrono.tick(step) # Record things like epoch number, core hours etc.
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.chrono.resume()
mw.step_end()
# Run evals after done with training. Running them here guarantees evals
# will run if job is restarted after writting the last checkpoint and
# also supports eval only runs (when total_steps or num_epochs is 0).
mw.step_start(total_steps)
for (name, evaluator, _, prefix) in evaluators():
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
write_note(f"Done!\n{u.chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Few common utils used in both/all flexi-trainers."""
import functools
import itertools
import numpy as np
def mkrng(xid, wid, step):
# Need to cap at 0, for example localruns use -1.
rng_key = (max(xid, 0), max(wid, 0), max(step, 0))
return np.random.default_rng(rng_key)
def mkprob(x):
if x is None:
return x
return np.array(x) / np.sum(x)
def choice(values, ratios, rng=None):
rng = rng or np.random.default_rng()
return rng.choice(values, p=mkprob(ratios))
def mkpredictfns(predict_fn, config, template="predict_{x}"):
# If we have two flexi args a=[1,2], b=[10,20], then we create a
# predict_fn for all possible combinations, named "predict_a=1_b=10" etc.
all_combinations = [dict(comb) for comb in itertools.product(
*[[(arg, val) for val in config[arg].v] for arg in config]
)]
return {
template.format(x="_".join(f"{k}={v}" for k, v in kw.items())):
functools.partial(predict_fn, **kw)
for kw in all_combinations}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distill a teacher model into a FlexiViT student.
Note this file has code that is generic enough to allow using an ensemble
of teachers. This is inherited from `proj/distill/distill.py` and the goal
to only make minimal changes in a fork of that file. However, this feature
does not really make sense for FlexiViT.
"""
# pylint: disable=consider-using-from-import
from functools import partial
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.evaluators.proj.distill.distance as dd
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.trainers.proj.flexi.common as flexi
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
from tensorflow.io import gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def getfirst(d, *keys):
"""Returns the first of `keys` that's present in mapping `d`."""
result, found = None, False
for k in reversed(keys):
if k in d:
result, found = d[k], True
if found:
return result
else:
raise KeyError(f"None of {keys} is in {d.keys()}")
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info(
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image", "ops_text"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
write_note("Initializing train dataset...")
train_ds, ntrain_img = input_pipeline.training(config.input)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
total_steps = u.steps("total", config, ntrain_img, batch_size)
def get_steps(name, default=ValueError, cfg=config):
return u.steps(name, cfg, ntrain_img, batch_size, total_steps, default)
u.chrono.inform(total_steps=total_steps, global_bs=batch_size,
steps_per_epoch=ntrain_img / batch_size,
measure=mw.measure, write_note=write_note)
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
# Create student and teacher models
def get_model_mod(name): # Used many times.
mod_name = config[f"{name}_name"]
return importlib.import_module(f"big_vision.models.{mod_name}")
write_note("Initializing models...")
def make_model(name):
return get_model_mod(name).Model(
num_classes=config.num_classes, **config.get(name, {}))
models = {
"student": make_model("student"),
**{t: make_model(t) for t in config.teachers}
}
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
def get_init(model, name):
@partial(jax.jit, backend="cpu")
def _init(rng):
bs = batch_size // jax.device_count()
img_size = tuple(getfirst(train_ds.element_spec, name, "image").shape[1:])
no_image = jnp.zeros((bs,) + img_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, no_image))["params"]
return params
return _init
rng, *rng_inits = jax.random.split(rng, len(models) + 1)
with u.chrono.log_timing("z/secs/init"):
params_cpu = {
name: get_init(models[name], name=name)(r)
for name, r in zip(models, rng_inits)}
if jax.process_index() == 0:
for name, params in params_cpu.items():
parameter_overview.log_parameter_overview(params, msg=f"{name} params")
mw.measure(f"num_params_{name}",
sum(p.size for p in jax.tree_leaves(params)))
write_note(f"Initializing {config.optax_name} optimizer...")
# For now, we explicitly only optimize the student parameters as there's
# nothing else to be optimized. If we ever want to add learnable projections
# or similar for good (we explored but ditched), need to refactor this a bit.
tx, sched_fns = bv_optax.make(
config, params_cpu["student"], sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu["student"])
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@jax.named_call
def loss_fn(student_params, params, data, rngs, **flexi_kw):
# Note: need to extract and use `student_params` out of `params` because the
# first argument of `loss_fn` is what's differentiated wrt.
params["student"] = student_params
def fwd(name, params):
return jax.named_call(models[name].apply, name=name)(
{"params": params}, getfirst(data, name, "image"),
train=name == "student", rngs=rngs.get(name),
**(flexi_kw if name == "student" else {})
)[0] # logits, unused_outputs
logits = {name: fwd(name, w) for name, w in params.items()}
measurements = {}
for name, lg in logits.items():
measurements[f"entropy_{name}"] = -jnp.sum(
jax.nn.log_softmax(lg) * jax.nn.softmax(lg), axis=-1)
if "labels" in data:
measurements[f"task_loss_{name}"] = u.softmax_xent(
logits=lg, labels=data["labels"], reduction=False)
# NOTE: xent is linear in labels, so for KL, this is actually the same as
# using a teacher-ensemble in probs-space!
measurements["distill_loss"] = 0.0
for name in config.teachers:
l = dd.dist(logits["student"], logits[name], config.get("distance", "kl"),
**config.get("distance_kw", {}))
measurements[f"distill_loss_{name}"] = l
measurements["distill_loss"] += l
outputs = (measurements["distill_loss"], measurements)
return jax.tree_map(jnp.mean, outputs)
flexi_argnames = sorted(config.flexi)
@partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1),
static_broadcasted_argnums=tuple(range(4, 4 + len(flexi_argnames))))
def update_fn(params, opt, rng, data, *args):
"""Update step."""
# Mixup. Note: overwrites the `data` entries (that's intended).
if config.get("mixup") and config.mixup.p:
to_mix = {name: data[name]
for name in ("image", "labels") + tuple(models) if name in data}
rng, _, to_mix = u.mixup(rng, **config.mixup, **to_mix)
data = {**data, **to_mix}
# Get device-specific loss rng.
rng, *rng_models = jax.random.split(rng, len(models) + 1)
rngs_models_local = {
name: {"dropout": jax.random.fold_in(rngi, jax.lax.axis_index("batch"))}
for name, rngi in zip(models, rng_models)
}
w = params["student"] # Need to explicitly pull out the optimized ones.
(l, measurements), grads = jax.lax.pmean(
jax.value_and_grad(loss_fn, has_aux=True)(
w, params, data, rngs=rngs_models_local,
**dict(zip(flexi_argnames, args))),
axis_name="batch")
updates, opt = tx.update(grads, opt, w)
w = optax.apply_updates(w, updates)
params["student"] = w
# Take some logging measurements
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(w)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, rng, l, measurements
# We always load the teachers first, because they NEED to be initialized
# and since we don't ever modify them, we don't store them in checkpoints.
for name in config.teachers:
init_def = config[f"{name}_init"]
write_note(f"Initializing {name} from {init_def}…")
params_cpu[name] = get_model_mod(name).load(
params_cpu[name], init_def, config[name],
**config.get(f"{name}_load", {}))
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize student from something, e.g. start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
# NOTE: we never change the teachers, so only checkpoint student here.
checkpoint = {
"params": params_cpu["student"],
"opt": opt_cpu,
"chrono": u.chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu["student"], opt_cpu = checkpoint["params"], checkpoint["opt"]
u.chrono.load(checkpoint["chrono"])
elif config.get("student_init"):
write_note(f"Initialize student from {config.student_init}...")
params_cpu["student"] = get_model_mod("student").load(
params_cpu["student"], config.student_init, config.get("student"),
**config.get("student_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu["student"], msg="restored (student) params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
u.chrono.inform(first_step=first_step)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{u.chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
# Initializing evaluators later when they are first needed, so we can see
# issues with training faster.
evaluators = None
# Define predict functions that the evaluators can use:
def predict_fn(params, *, name, **kw):
image = kw.pop(name, kw.pop("image", None))
# Ugly API compatibility necessity:
for k in ("student", *config.teachers):
kw.pop(k, 0)
return models[name].apply({"params": params[name]}, image, **kw)
# 1. One for each variant of the student
student_pfns = flexi.mkpredictfns(partial(predict_fn, name="student"),
config.flexi, "student_{x}")
# 2. One per teacher model
teacher_pfns = {
name: partial(predict_fn, name=name)
for name in config.teachers
}
# 3. One for each (student-variant, teacher) pair, eg for distance eval.
combined_pfns = {
f"{sn}_{tn}": lambda *a, sfn=sfn, tfn=tfn, **kw: (sfn(*a, **kw), tfn(*a, **kw)) # pylint: disable=line-too-long
for sn, sfn in student_pfns.items()
for tn, tfn in teacher_pfns.items()
}
predict_fns = {**student_pfns, **teacher_pfns, **combined_pfns}
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{u.chrono.note}")
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
np_rng = flexi.mkrng(xid, wid, step)
flexi_args = [
flexi.choice(config.flexi[n].v, config.flexi[n].p, np_rng)
for n in flexi_argnames
]
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
with u.chrono.log_timing("z/secs/update0", noop=step > first_step + 1):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop, batch, *flexi_args)
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or u.chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
u.chrono.tick(step)
if not np.isfinite(l):
raise RuntimeError(f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
u.chrono.pause(wait_for=(params_repl["student"], opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu["student"], opt_cpu = jax.tree_map(
lambda x: np.array(x[0]), (params_repl["student"], opt_repl))
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu["student"],
"opt": opt_cpu,
"chrono": u.chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
u.chrono.resume()
if evaluators is None:
evaluators = eval_common.from_config(
config, predict_fns,
lambda s: write_note(f"Init evaluator: {s}…\n{u.chrono.note}"),
lambda key, cfg: get_steps(key, default=None, cfg=cfg),
)
for (name, evaluator, log_steps, prefix) in evaluators:
if u.itstime(step, log_steps, total_steps):
u.chrono.pause(wait_for=params_repl)
u.chrono.tick(step) # Record things like epoch number, core hours etc.
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.chrono.resume()
mw.step_end()
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
write_note(f"Done!\n{u.chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop with flexible/schedulable settings."""
# pylint: disable=consider-using-from-import
from functools import partial
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.trainers.proj.flexi.common as flexi
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
from tensorflow.io import gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info(
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image", "ops_text"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
write_note("Initializing train dataset...")
train_ds, ntrain_img = input_pipeline.training(config.input)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
total_steps = u.steps("total", config, ntrain_img, batch_size)
def get_steps(name, default=ValueError, cfg=config):
return u.steps(name, cfg, ntrain_img, batch_size, total_steps, default)
u.chrono.inform(total_steps=total_steps, global_bs=batch_size,
steps_per_epoch=ntrain_img / batch_size,
measure=mw.measure, write_note=write_note)
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
write_note(f"Initializing {config.model_name} model...")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = model_mod.Model(
num_classes=config.num_classes, **config.get("model", {}))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend="cpu")
def init(rng):
shape = tuple(train_ds.element_spec["image"].shape[1:])
bs = batch_size // jax.device_count()
dummy_input = jnp.zeros((bs,) + shape, jnp.float32)
params = flax.core.unfreeze(model.init(rng, dummy_input))["params"]
# Set bias in the head to a low value, such that loss is small initially.
if "init_head_bias" in config:
params["head"]["bias"] = jnp.full_like(params["head"]["bias"],
config["init_head_bias"])
return params
rng, rng_init = jax.random.split(rng)
with u.chrono.log_timing("z/secs/init"):
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
flexi_argnames = sorted(config.flexi)
@partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1),
static_broadcasted_argnums=tuple(range(5, 5 + len(flexi_argnames))))
def update_fn(params, opt, rng, images, labels, *args):
"""Update step."""
measurements = {}
if config.get("mixup") and config.mixup.p:
rng, (images, labels), _ = u.mixup(rng, images, labels, **config.mixup)
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index("batch"))
def loss_fn(params, images, labels):
logits, _ = model.apply(
{"params": params}, images,
train=True, rngs={"dropout": rng_model_local},
**dict(zip(flexi_argnames, args)))
return getattr(u, config.get("loss", "sigmoid_xent"))(
logits=logits, labels=labels)
l, grads = jax.value_and_grad(loss_fn)(params, images, labels)
l, grads = jax.lax.pmean((l, grads), axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, rng, l, measurements
# We do not jit/pmap this function, because it is passed to evaluator that
# does it later. We output as many intermediate tensors as possible for
# maximal flexibility. Later `jit` will prune out things that are not needed.
def predict_fn(params, image, **flexi_kw):
logits, out = model.apply({"params": params}, image, **flexi_kw)
return logits, out
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"opt": opt_cpu,
"chrono": u.chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu, opt_cpu = checkpoint["params"], checkpoint["opt"]
u.chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.get("model"),
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
u.chrono.inform(first_step=first_step)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{u.chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
# Initializing evaluators later when they are first needed, so we can see
# issues with training faster.
evaluators = None
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{u.chrono.note}")
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
np_rng = flexi.mkrng(xm_xp.id, xm_wu.id, step)
flexi_args = [
flexi.choice(config.flexi[n].v, config.flexi[n].p, np_rng)
for n in flexi_argnames
]
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
with u.chrono.log_timing("z/secs/update0", noop=step > first_step + 1):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop, batch["image"], batch["labels"],
*flexi_args)
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or u.chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
u.chrono.tick(step)
if not np.isfinite(l):
raise RuntimeError(f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
u.chrono.pause(wait_for=(params_repl, opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu = jax.tree_map(lambda x: np.array(x[0]), params_repl)
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu, "opt": opt_cpu, "chrono": u.chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
u.chrono.resume()
if evaluators is None:
evaluators = eval_common.from_config(
config, flexi.mkpredictfns(predict_fn, config.flexi, "predict_{x}"),
lambda s: write_note(f"Init evaluator: {s}…\n{u.chrono.note}"),
lambda key, cfg: get_steps(key, default=None, cfg=cfg),
)
for (name, evaluator, log_steps, prefix) in evaluators:
if u.itstime(step, log_steps, total_steps):
u.chrono.pause(wait_for=params_repl)
u.chrono.tick(step) # Record things like epoch number, core hours etc.
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.chrono.resume()
mw.step_end()
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
write_note(f"Done!\n{u.chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inputs, outputs and losses for depth prediction task."""
import big_vision.utils as u
import einops
import jax
import jax.numpy as jnp
import numpy as np
ONE_HOT_AXIS = -2
def input_pp(batch, config):
"""Makes inputs for depth prediction task."""
if "labels" not in batch:
x = None
else:
hp, wp = config.model.patch_size
depth = batch["labels"][..., 0]
# Discretize to [0, ..., bins - 1].
nbins = config.model.inputs.depth[ONE_HOT_AXIS]
mind = config.min_depth
maxd = config.max_depth
depth = (depth - mind) / (maxd - mind)
depth *= nbins
depth = jnp.floor(depth).astype(jnp.int32)
depth = jnp.minimum(depth, nbins - 1)
depth = jnp.maximum(depth, 0)
# Converts labels from (B, H, W, c) to (B, num_patches, c, patch_size).
depth = jax.nn.one_hot(
einops.rearrange(
depth, "b (hn hp) (wn wp) -> b (hn wn) (hp wp)", hp=hp, wp=wp),
num_classes=config.model.inputs.depth[ONE_HOT_AXIS],
axis=ONE_HOT_AXIS)
x = {"depth": depth}
ctx = batch.get("image_ctx", batch.get("image", None))
return {"ctx": ctx, "x": x}
def loss_fn(predictions, batch, config):
"""Computes loss for depth prediction task."""
labels = input_pp(batch, config)["x"]
losses = {}
loss = u.softmax_xent(
logits=predictions["depth"], labels=labels["depth"], reduction=False,
axis=ONE_HOT_AXIS)
# Do not train on the closest class; usually regions of the image with
# depth==0, which is the default for regions with no depth signal.
# TODO: Encode depth==0 as class==-1.
mask = jnp.argmax(labels["depth"], ONE_HOT_AXIS) != 0
loss = loss * mask
losses["loss_depth"] = loss
return sum(losses.values()), losses
def predict_outputs(predictions, config):
"""Makes outputs for depth predictin tasks."""
# Maps predictions to (height, width, channels).
hp, wp = config.model.patch_size
hn, wn = np.array(config.model.input_size) // np.array((hp, wp))
depth = einops.rearrange(
predictions["depth"],
"b (hn wn) c (hp wp) -> b (hn hp) (wn wp) c",
hn=hn, wn=wn, hp=hp, wp=wp)
depth = jnp.argmax(depth, axis=-1) # [B, H, W]
# Revert discretization.
nbins = config.model.inputs.depth[ONE_HOT_AXIS]
mind = config.min_depth
maxd = config.max_depth
depth = depth.astype(jnp.float32) + 0.5 # Undoes floor in expectation.
depth /= nbins
depth = depth * (maxd - mind) + mind
return {"depth": depth}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train loop for training the stage-II model."""
# pylint: disable=consider-using-from-import
import functools
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
from big_vision import input_pipeline
import big_vision.datasets.core as ds_core
import big_vision.evaluators.common as eval_common
import big_vision.models.proj.uvim.decode as decode
import big_vision.optax as bv_optax
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow.io.gfile as gfile
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
FLAGS = flags.FLAGS
ONE_HOT_AXIS = -2
partial = functools.partial
def get_model(config):
mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = mod.Model(**config.model)
return model, mod
def setup_task(config):
"""Get functions and params to encode and decode labels as token sequences."""
config = config.oracle
# Define task input and predict functions.
task_module = importlib.import_module(f"big_vision.trainers.{config.task}")
input_fn = partial(task_module.input_pp, config=config)
predict_outputs_fn = partial(task_module.predict_outputs, config=config)
oracle, mod = get_model(config)
if config.get("model_init", None):
params, state = mod.load(None, config.model_init)
params = {"params": params, "state": state}
else:
params = {}
def encode_labels(params, batch):
inputs = input_fn(batch)
code = oracle.apply(params, **inputs, method=oracle.encode)[1]["code"]
return code + 1 # To avoid padding symbol.
def decode_labels(params, code, batch, **kwargs):
code = code - 1
inputs = input_fn(batch)
inputs["x"] = code
logits, _ = oracle.apply(
params, **inputs, discrete_input=True, **kwargs, method=oracle.decode)
return logits
return encode_labels, decode_labels, predict_outputs_fn, params
def main(argv):
del argv
config = FLAGS.config
workdir = FLAGS.workdir
logging.info("\u001b[33mHello from process %i holding %i/%i devices and "
"writing to workdir %s.\u001b[0m", jax.process_index(),
jax.local_device_count(), jax.device_count(), workdir)
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules",
["ops_general", "ops_image", "proj.uvim.pp_ops"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
chrono = u.Chrono()
write_note("Initializing train dataset...")
train_data = ds_core.get(**config.input.data)
train_ds = input_pipeline.make_for_train(
data=train_data.get_tfdata(ordered=False),
batch_size=batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(config.input.get("pp")),
shuffle_buffer_size=config.input.get("shuffle_buffer_size"),
cache_raw=config.input.get("cache_raw", False),
filter_fn=config.input.get("filter_fn"),
)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
ntrain_img = train_data.total_examples
def get_steps(name, default=ValueError): # partial doesn't work well here.
return u.steps(name, config, ntrain_img, batch_size, default)
total_steps = get_steps("total")
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
write_note(f"Initializing {config.model_name} model...")
model, model_mod = get_model(config)
encode_labels, decode_labels, predict_outputs_fn, task_params = (
setup_task(config))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend="cpu")
def init(rng):
batch = jax.tree_map(
lambda x: jnp.zeros(x.shape, x.dtype.as_numpy_dtype),
train_ds.element_spec)
images = batch["image"]
labels = encode_labels(task_params, batch)
variables = model.init(rng, images, labels)
params = flax.core.unfreeze(variables["params"])
return params
rng, init_rng = jax.random.split(rng)
params_cpu = init(init_rng)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1))
def update_fn(params, opt, batch, update_rng, task_params):
"""Update step."""
images = batch["image"]
labels = encode_labels(task_params, batch)
measurements = {}
rng, new_rng = jax.random.split(update_rng)
# bind the rng key to the device id (which is unique across hosts)
rng_local = jax.random.fold_in(rng, jax.lax.axis_index("batch"))
def loss_fn(params, images, labels):
logits = model.apply({"params": params}, images, labels, train=True,
rngs={"dropout": rng_local})
loss = u.weighted_softmax_xent(
logits=logits, labels=labels,
reduction=True, normalize=True)
return loss
l, grads = jax.value_and_grad(loss_fn)(params, images, labels)
l, grads = jax.lax.pmean((l, grads), axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, l, new_rng, measurements
# Define evaluators.
def validation_fn(params, batch):
"""Compute per-example metrics."""
params, task_params = params["params"], params["task_params"]
images = batch["image"]
labels = encode_labels(task_params, batch)
logits = model.apply({"params": params}, images, labels, train=False)
loss = u.weighted_softmax_xent(
logits=logits, labels=labels,
reduction=False, normalize=True)
losses = {"loss": loss}
return jax.tree_map(
lambda x: jnp.mean(x, axis=tuple(range(1, x.ndim))),
losses)
def predict_fn(params, batch, seed=0, temperature=1e-7, **extra):
params, task_params = params["params"], params["task_params"]
# Derive a rng key from the inputs so that all batches use different keys.
if "image/id" in batch:
key = batch["image/id"]
else:
key = batch["image"].sum(axis=[1, 2, 3]).astype(jnp.int32)
local_rng = jax.lax.scan(
lambda k, x: (jax.random.fold_in(k, x), None),
jax.random.PRNGKey(seed),
key,
)[0]
images = batch["image"]
batch_size = images.shape[0]
prompts = jnp.zeros((batch_size, config.model.seq_len), dtype=jnp.int32)
seqs, _, _ = decode.temperature_sampling(
params={"params": params}, model=model, seed=local_rng,
inputs=images, prompts=prompts,
num_samples=1, eos_token=-1, prefill=False,
temperature=temperature)
seqs = jnp.squeeze(seqs, 1)
logits = decode_labels(task_params, seqs, batch)
return predict_outputs_fn(logits, **extra)
# Only initialize evaluators when they are first needed.
@functools.lru_cache(maxsize=None)
def evaluators():
return eval_common.from_config(
config, {"predict": predict_fn, "validation": validation_fn},
lambda s: write_note(f"Initializing evaluator: {s}...\n{chrono.note}")
)
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Initialize part of the model from something, eg. only encoder or decoder.
# 5. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"opt": opt_cpu,
"chrono": chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu, opt_cpu = checkpoint["params"], checkpoint["opt"]
chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.model,
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
chrono.inform(first_step, total_steps, batch_size, ntrain_img / batch_size)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
task_params = flax.jax_utils.replicate(task_params)
update_rngs = flax.jax_utils.replicate(rng)
ckpt_writer = None
write_note(f"First step compilations...\n{chrono.note}")
error = None # For exiting with an error after cleanup. Avoids indentation.
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
params_repl, opt_repl, loss_value, update_rngs, measurements = (
update_fn(
params_repl,
opt_repl,
batch,
update_rng=update_rngs,
task_params=task_params))
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
chrono.tick(step, mw.measure, write_note)
if not np.isfinite(l):
error = (f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
break
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
chrono.pause(wait_for=(params_repl, opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
params_cpu = jax.tree_map(lambda x: np.array(x[0]), params_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu, "opt": opt_cpu, "chrono": chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators():
if u.itstime(step, log_steps, total_steps, first=log_steps < total_steps,
last=False):
chrono.pause(wait_for=(params_repl, task_params))
write_note(f"{name} evaluation...\n{chrono.note}")
for key, value in evaluator.run(
{"params": params_repl, "task_params": task_params}):
mw.measure(f"{prefix}{key}", value)
chrono.resume()
mw.step_end()
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Run final evalution, also used for eval only jobs (when total_steps == 0).
for (name, evaluator, _, prefix) in evaluators():
write_note(f"{name} evaluation...\n{chrono.note}")
for key, value in evaluator.run(
{"params": params_repl, "task_params": task_params}):
mw.measure(f"{prefix}{key}", value)
# Last note needs to happen before the pool's closed =)
if not error:
write_note(f"Done!\n{chrono.note}")
else:
write_note(f"Failed!\n{error}\n{chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
# Before cleanup, as cleanup should only run for successful jobs.
if error is not None:
raise RuntimeError(error)
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train loop for training the stage-I model."""
# pylint: disable=consider-using-from-import
import functools
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
from big_vision import input_pipeline
import big_vision.datasets.core as ds_core
import big_vision.evaluators.common as eval_common
import big_vision.optax as bv_optax
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow.io.gfile as gfile
SG = jax.lax.stop_gradient
partial = functools.partial
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info("Workdir: %s", workdir)
logging.info("\u001b[33mHello from process %i holding %i/%i devices and "
"writing to workdir %s.\u001b[0m", jax.process_index(),
jax.local_device_count(), jax.device_count(), workdir)
# Define task input, loss and predict functions.
task_module = importlib.import_module(f"big_vision.trainers.{config.task}")
input_pp_fn = partial(task_module.input_pp, config=config)
task_loss_fn = partial(task_module.loss_fn, config=config)
predict_outputs_fn = partial(task_module.predict_outputs, config=config)
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules",
["ops_general", "ops_image", "proj.uvim.pp_ops"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
chrono = u.Chrono()
write_note("Initializing train dataset...")
train_data = ds_core.get(**config.input.data)
train_ds = input_pipeline.make_for_train(
data=train_data.get_tfdata(ordered=False),
batch_size=batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(config.input.get("pp")),
shuffle_buffer_size=config.input.get("shuffle_buffer_size"),
cache_raw=config.input.get("cache_raw", False),
filter_fn=config.input.get("filter_fn"),
)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
ntrain_img = train_data.total_examples
def get_steps(name, default=ValueError): # partial doesn't work well here.
return u.steps(name, config, ntrain_img, batch_size, default)
total_steps = get_steps("total")
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
write_note(f"Initializing {config.model_name} model...")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = model_mod.Model(**config.model)
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend="cpu")
def init(rng):
batch = jax.tree_map(
lambda x: jnp.zeros(x.shape, x.dtype.as_numpy_dtype),
train_ds.element_spec)
init_res = flax.core.unfreeze(model.init(rng, **input_pp_fn(batch)))
params, state = init_res["params"], init_res["state"]
# Set bias in the heads to a low value, such that loss is small initially.
for key in config.model.outputs:
params[f"head_{key}"]["bias"] = jnp.full_like(
params[f"head_{key}"]["bias"], config.get("init_head_bias", 0))
return params, state
rng, rng_init = jax.random.split(rng)
rng_init_params, rng_init_state = jax.random.split(rng_init)
params_cpu, state_cpu = init({"params": rng_init_params,
"state": rng_init_state})
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1, 2),
static_broadcasted_argnums=(5,))
def update_fn(params, opt, state, batch, rng, update_dict=True):
"""Update step."""
measurements = {}
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index("batch"))
def loss_fn(params, state, batch):
(logits, out), mutated_col = model.apply(
{"params": params, "state": state},
**input_pp_fn(batch),
train=True, update_dict=update_dict,
rngs={"dropout": rng_model_local, "vqvae": rng_model},
mutable=["state"])
btlneck = out["bottleneck"]
btlneck_q = out["bottleneck_q"]
loss_rec, logs = jax.tree_map(jnp.mean, task_loss_fn(logits, batch))
loss_commitment = jnp.mean(jnp.square(btlneck - SG(btlneck_q)))
loss = loss_rec + config.get("w_commitment", 0.25) * loss_commitment
aux = {
"loss_rec": jax.lax.pmean(loss_rec, axis_name="batch"),
"loss_commitment": jax.lax.pmean(loss_commitment, axis_name="batch"),
"codebook_zeros_ratio": out["codebook_zeros_ratio"],
"codebook_max_ratio": out["codebook_max_ratio"],
"state": mutated_col["state"],
**jax.tree_map(partial(jax.lax.pmean, axis_name="batch"), logs),
}
return loss, aux
(l, aux), grads = jax.value_and_grad(loss_fn, has_aux=True)(
params, state, batch)
l, grads = jax.lax.pmean((l, grads), axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
state = aux.pop("state")
measurements = {**measurements, **aux}
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, state, l, rng, measurements
# Define evaluators.
def validation_fn(params, batch):
"""Compute per-example metrics."""
logits, out = model.apply(params, **input_pp_fn(batch))
_, losses = task_loss_fn(logits, batch)
btlneck = out["bottleneck"]
btlneck_q = out["bottleneck_q"]
losses["loss_commitment"] = jnp.square(btlneck - btlneck_q)
return jax.tree_map(
lambda x: jnp.mean(x, axis=tuple(range(1, x.ndim))),
losses)
def predict_fn(params, batch):
logits, _ = model.apply(params, **input_pp_fn(batch))
outputs = predict_outputs_fn(logits)
return outputs
# Only initialize evaluators when they are first needed.
@functools.lru_cache(maxsize=None)
def evaluators():
return eval_common.from_config(
config, {"predict": predict_fn, "validation": validation_fn},
lambda s: write_note(f"Initializing evaluator: {s}...\n{chrono.note}")
)
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = fillin(config.resume)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"state": state_cpu,
"opt": opt_cpu,
"chrono": chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu = checkpoint["params"]
state_cpu = checkpoint["state"]
opt_cpu = checkpoint["opt"]
chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu, state_cpu = model_mod.load(
{"params": params_cpu, "state": state_cpu},
config.model_init, config.model,
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
chrono.inform(first_step, total_steps, batch_size, ntrain_img / batch_size)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
state_repl = flax.jax_utils.replicate(state_cpu)
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{chrono.note}")
error = None # For exiting with an error after cleanup. Avoids indentation.
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
params_repl, opt_repl, state_repl, loss_value, rngs_loop, measurements = (
update_fn(
params_repl,
opt_repl,
state_repl,
batch,
rngs_loop,
not config.get("freeze_dict", True)))
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
chrono.tick(step, mw.measure, write_note)
if not np.isfinite(l):
error = (f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
break
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
chrono.pause(wait_for=(params_repl, opt_repl, state_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu, opt_cpu, state_cpu = jax.tree_map(
lambda x: np.array(x[0]), (params_repl, opt_repl, state_repl))
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {
"params": params_cpu,
"state": state_cpu,
"opt": opt_cpu,
"chrono": chrono.save(),
}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators():
if u.itstime(step, log_steps, total_steps):
chrono.pause(wait_for=(params_repl, state_repl))
write_note(f"{name} evaluation...\n{chrono.note}")
for key, value in evaluator.run(
{"params": params_repl, "state": state_repl}):
mw.measure(f"{prefix}{key}", value)
chrono.resume()
mw.step_end()
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Support eval only runs: run evaluation if total_steps (or num_epochs) is 0.
if total_steps == 0:
for (name, evaluator, _, prefix) in evaluators():
write_note(f"{name} evaluation...\n{chrono.note}")
for key, value in evaluator.run(
{"params": params_repl, "state": state_repl}):
mw.measure(f"{prefix}{key}", value)
# Last note needs to happen before the pool's closed =)
if not error:
write_note(f"Done!\n{chrono.note}")
else:
write_note(f"Failed!\n{error}\n{chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
# Before cleanup, as cleanup should only run for successful jobs.
if error is not None:
raise RuntimeError(error)
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inputs, outputs and losses for panoptic task."""
import big_vision.utils as u
import einops
import jax
import jax.numpy as jnp
import numpy as np
ONE_HOT_AXIS = -2
def input_pp(batch, config):
"""Make inputs for panoptic segmentation task."""
if "labels" not in batch:
# During predict of phase2 there is no 'labels' field.
x = None
else:
hp, wp = config.model.patch_size
x = {
"semantics": batch["labels"][..., 0],
"instances": batch["labels"][..., 1],
}
# Convert labels from (B, H, W) to (B, num_patches, num_classes, patch_size)
for key in ["semantics", "instances"]:
x[key] = jax.nn.one_hot(
einops.rearrange(
x[key], "b (hn hp) (wn wp) -> b (hn wn) (hp wp)", hp=hp, wp=wp),
num_classes=config.model.inputs[key][ONE_HOT_AXIS], axis=ONE_HOT_AXIS)
ctx = batch.get("image_ctx", batch.get("image", None))
return {"ctx": ctx, "x": x}
def loss_fn(logits, batch, config):
"""Compute loss for panoptic task."""
labels = input_pp(batch, config)["x"]
losses = {}
for key in ["semantics", "instances"]:
losses[f"loss_{key}"] = u.softmax_xent(
logits=logits[key], labels=labels[key], reduction=False,
axis=ONE_HOT_AXIS)
return sum(losses.values()), losses
def predict_outputs(logits, config, min_fraction=0.0):
"""Make outputs for panoptic segmentation task."""
# Map logits to (height, width, channels).
hp, wp = config.model.patch_size
hn, wn = np.array(config.model.input_size) // np.array((hp, wp))
outputs = {}
for key in ["semantics", "instances"]:
assert ONE_HOT_AXIS == -2, "Rearrange below depends on this."
outputs[key] = einops.rearrange(
logits[key],
"b (hn wn) c (hp wp) -> b (hn hp) (wn wp) c",
hn=hn, wn=wn, hp=hp, wp=wp)
return panoptic_predictions_from_logits(
**outputs, min_fraction=min_fraction)
def panoptic_predictions_from_logits(semantics, instances, min_fraction=0.0):
"""Make panoptic prediction from logits."""
ins = jnp.argmax(instances, axis=-1)
# Note: Make sure each instance has all pixels annotated with same label.
# Otherwise they are further split into more instances and greatly affect
# the number of unmatched predicted segments (FP) and RQ.
masks = jax.nn.one_hot(ins, instances.shape[-1], dtype=jnp.int32)
label = jnp.argmax(jnp.einsum("bhwk,bhwn->bnk", semantics, masks), axis=-1)
sem = jnp.einsum("bhwn,bn->bhw", masks, label)
out = jnp.stack([sem, ins], axis=-1)
# Filter out small objects
fraction = jnp.sum(masks, axis=(1, 2), keepdims=True)/np.prod(ins.shape[1:3])
mask_big = (fraction > min_fraction).astype("int32")
mask_big_spatial = jnp.sum(masks * mask_big, axis=-1, keepdims=True) > 0
return out * mask_big_spatial.astype("int32")
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to inspect coco data and predictions in notebooks."""
# pylint: disable=consider-using-from-import
import functools
import json
import numpy as np
from panopticapi import utils as pycoco_utils
from skimage import segmentation
import tensorflow.io.gfile as gfile
import os
ROOT = os.environ.get('COCO_DATA_DIR', '.')
PANOPTIC_COCO_CATS_FILE = f'{ROOT}/panoptic_coco_categories.json'
@functools.lru_cache(maxsize=None)
def _coco_panoptic_categories():
with gfile.GFile(PANOPTIC_COCO_CATS_FILE, 'r') as f:
categories_list = json.load(f)
return tuple(categories_list)
def rgb_panoptic_from_twochannels(twochannels, boundaries: bool = False):
"""Makes a RGB panoptic output and segments_info from a twochannels view."""
semantics = twochannels[..., 0]
instances = twochannels[..., 1]
max_instances = np.max(instances) + 1
merged = semantics * max_instances + instances
merged = np.where(semantics < 0, semantics, merged)
categories_list = _coco_panoptic_categories()
categories = {category['id']: category for category in categories_list}
id_generator = pycoco_utils.IdGenerator(categories)
segments_info = {}
rgb = np.zeros((*instances.shape[:2], 3), dtype=np.uint8)
for merged_id in np.unique(merged):
if merged_id // max_instances > 0:
category = categories_list[int(merged_id // max_instances) - 1]
segment_id, color = id_generator.get_id_and_color(category['id'])
else:
category = {'id': -1, 'name': 'void', 'isthing': False}
segment_id, color = -1, np.array([0, 0, 0])
segments_info[segment_id] = {
'id': segment_id,
'color': color,
'category_id': category['id'],
'name': category['name'],
'isthing': category['isthing'],
}
rgb[merged == merged_id] = color
if boundaries:
boundaries = segmentation.find_boundaries(
pycoco_utils.rgb2id(rgb), mode='thick')
rgb[boundaries] = 0
return rgb, segments_info
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inputs, outputs and losses for colorization task."""
import einops
import jax.numpy as jnp
import numpy as np
ONE_HOT_AXIS = -2
def input_pp(batch, config):
"""Make inputs for colorization task."""
if "labels" not in batch:
# During predict of phase2 there is no 'labels' field.
x = None
else:
hp, wp = config.model.patch_size
x = {
"color": batch["labels"],
}
# Convert labels from (B, H, W) to (B, num_patches, C, patch_size)
x["color"] = einops.rearrange(
x["color"], "b (hn hp) (wn wp) c -> b (hn wn) c (hp wp)", hp=hp, wp=wp)
ctx = batch.get("image_ctx", batch.get("image", None))
return {"ctx": ctx, "x": x}
def loss_fn(logits, batch, config):
"""Compute loss for colorization task."""
labels = input_pp(batch, config)["x"]
error = logits["color"] - labels["color"]
loss = jnp.square(error)
return loss, {"loss_color": loss}
def predict_outputs(logits, config):
"""Make outputs for colorization task."""
# Map logits to (height, width, channels).
hp, wp = config.model.patch_size
hn, wn = np.array(config.model.input_size) // np.array((hp, wp))
assert ONE_HOT_AXIS == -2, "Rearrange below depends on this."
output = einops.rearrange(
logits["color"],
"b (hn wn) c (hp wp) -> b (hn hp) (wn wp) c",
hn=hn,
wn=wn,
hp=hp,
wp=wp)
output = jnp.clip(output, -1., 1.)
return {"color": output}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contrastive training loop.
For models Like
- LiT (https://arxiv.org/abs/2111.07991)
- CLIP (https://arxiv.org/abs/2103.00020)
"""
# pylint: disable=consider-using-from-import
import functools
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
from tensorflow.io import gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def all_gather(z):
"""All gather and flatten first two dims."""
gather_flat = lambda x: jnp.concatenate(jax.lax.all_gather(x, "batch"), 0)
return jax.tree_map(gather_flat, z)
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info( # pylint: disable=logging-fstring-interpolation
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
save_ckpt_path = None
if workdir: # Always create if requested, even if we may not write into it.
gfile.makedirs(workdir)
save_ckpt_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image", "ops_text"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
write_note("Initializing...")
batch_size = config.input.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir, config)
write_note("Initializing train dataset...")
train_ds, ntrain_img = input_pipeline.training(config.input)
# Start prefetching already.
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
total_steps = u.steps("total", config, ntrain_img, batch_size)
def get_steps(name, default=ValueError, cfg=config):
return u.steps(name, cfg, ntrain_img, batch_size, total_steps, default)
u.chrono.inform(total_steps=total_steps, global_bs=batch_size,
steps_per_epoch=ntrain_img / batch_size,
measure=mw.measure, write_note=write_note)
info("Running for %d steps, that means %f epochs",
total_steps, total_steps * batch_size / ntrain_img)
write_note(f"Initializing {config.model_name} model...")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = model_mod.Model(**config.get("model", {}))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@functools.partial(jax.jit, backend="cpu")
def init(rng):
bs = batch_size // jax.device_count()
image_size = tuple(train_ds.element_spec["image"].shape[1:])
no_image = jnp.zeros((bs,) + image_size, jnp.float32)
text_size = tuple(train_ds.element_spec["labels"].shape[1:])
no_text = jnp.zeros((bs,) + text_size, jnp.int32)
params = flax.core.unfreeze(model.init(rng, no_image, no_text))["params"]
return params
rng, rng_init = jax.random.split(rng)
with u.chrono.log_timing("z/secs/init"):
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
total_steps=total_steps, batch_size=batch_size, data_size=ntrain_img))
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@functools.partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1))
def update_fn(params, opt, rng, batch):
"""Update step."""
assert "mixup" not in config, "We still have to figure out mixup."
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index("batch"))
def loss_fn(params, images, labels):
zimg, ztxt, extras = model.apply(
{"params": params}, images, labels,
train=True, rngs={"dropout": rng_model_local})
# Gather representations across cores for larger batch size for loss.
if config.get("loss_use_global_batch", False):
zimg, ztxt = all_gather((zimg, ztxt))
l, l_extras = u.bidirectional_contrastive_loss(
zimg, ztxt, extras["t"], reduction=True)
return l, {
"t": extras["t"],
"t/parameter": extras["t/parameter"],
"nimg": jnp.mean(extras["img/norm"]),
"ntxt": jnp.mean(extras["txt/norm"]),
**l_extras,
}
(l, measurements), grads = jax.value_and_grad(
loss_fn, has_aux=True)(params, batch["image"], batch["labels"])
l, measurements, grads = jax.lax.pmean((l, measurements, grads),
axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum([jnp.vdot(g, g) for g in gs]))
ps = jax.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum([jnp.vdot(p, p) for p in ps]))
us = jax.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum([jnp.vdot(u, u) for u in us]))
return params, opt, rng, l, measurements
# We require hashable function reference for evaluator.
# We do not jit/pmap this function, because it is passed to evaluator that
# does it later. We output as many intermediate tensors as possible for
# maximal flexibility. Later `jit` will prune out things that are not needed.
def predict_fn(params, image=None, text=None, **unused_kwargs):
del unused_kwargs # `unused_kwargs` is to be compatible with few-shot
zimg, ztxt, out = model.apply({"params": params}, image, text)
return zimg, ztxt, out
# Only initialize evaluators when they are first needed.
@functools.lru_cache(maxsize=None)
def evaluators():
return eval_common.from_config(
config, {"predict": predict_fn},
lambda s: write_note(f"Init evaluator: {s}…\n{u.chrono.note}"),
lambda key, cfg: get_steps(key, default=None, cfg=cfg),
)
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_ckpt_path = None
if save_ckpt_path and gfile.exists(save_ckpt_path):
resume_ckpt_path = save_ckpt_path
elif config.get("resume"):
resume_ckpt_path = config.resume.format(wid=xm_wu.id)
if resume_ckpt_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"opt": opt_cpu,
"chrono": u.chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_ckpt_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu, opt_cpu = checkpoint["params"], checkpoint["opt"]
u.chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.get("model"),
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
u.chrono.inform(first_step=first_step)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{u.chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
ckpt_writer = None
write_note(f"First step compilations...\n{u.chrono.note}")
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, batch in zip(range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
with u.chrono.log_timing("z/secs/update0", noop=step > first_step + 1):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop, batch)
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, get_steps("log_training"))
# Report training progress
if (u.itstime(step, get_steps("log_training"), total_steps, host=0)
or u.chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
u.chrono.tick(step)
if not np.isfinite(l):
raise RuntimeError(f"The loss became nan or inf somewhere within steps "
f"[{step - get_steps('log_training')}, {step}]")
# Checkpoint saving
if (save_ckpt_path and
(u.itstime(step, get_steps("ckpt", None), total_steps, host=0) or
u.itstime(step, get_steps("keep_ckpt", None), total_steps, host=0))):
u.chrono.pause(wait_for=(params_repl, opt_repl))
u.checkpointing_timeout(ckpt_writer, config.get("ckpt_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu = jax.tree_map(lambda x: np.array(x[0]), params_repl)
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, get_steps("keep_ckpt", None), total_steps):
copy_step = step
ckpt = {"params": params_cpu, "opt": opt_cpu, "chrono": u.chrono.save()}
ckpt_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_ckpt_path, copy_step))
u.chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators():
if u.itstime(step, log_steps, total_steps, first=log_steps < total_steps,
last=False):
u.chrono.pause(wait_for=params_repl)
u.chrono.tick(step) # Record things like epoch number, core hours etc.
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
u.chrono.resume()
mw.step_end()
# Run evals after done with training. Running them here guarantees evals
# will run if job is restarted after writting the last checkpoint and
# also supports eval only runs (when total_steps or num_epochs is 0).
mw.step_start(total_steps)
for (name, evaluator, _, prefix) in evaluators():
write_note(f"{name} evaluation...\n{u.chrono.note}")
with u.chrono.log_timing(f"z/secs/eval/{name}"):
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
write_note(f"Done!\n{u.chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync()
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This file provides jax implementation of GSAM.'''
import jax
import jax.numpy as jnp
def dual_vector(y):
"""Returns the solution of max_x y^T x s.t. ||x||_2 <= 1.
Args:
y: A pytree of numpy ndarray, vector y in the equation above.
"""
gradient_norm = jnp.sqrt(sum(
jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)))
normalized_gradient = jax.tree_map(lambda x: x / gradient_norm, y)
return normalized_gradient, gradient_norm
def gsam_gradient(loss_fn, params, inputs, targets,
rho_max, rho_min, alpha, lr, lr_max, lr_min, eps=1e-12,
adaptive_perturbation=False, minimize_fp=True):
"""
Get the GSAM gradient (https://openreview.net/pdf?id=edONMAnhLu-).
Args:
loss_fn: the loss function.
params: the model weights.
inputs: the inputs to the loss function.
targets: the targets to the loss function.
rho_max: the maximum rho value for perturbation of weights.
rho_min: the minimum rho value for perturbation of weights.
alpha: the alpha value for the rho schedule, see Algorithm 1 in the paper.
lr: current learning rate.
lr_max: the maximum learning rate.
lr_min: the minimum learning rate.
eps: the epsilon value for numerical stability.
adaptive_perturbation: if False, same perturbation as SAM,
treat all parameters as a single vector,
perturbation norm is calculated as the norm of the whole vector;
If True, perturbation norm is proportional to parameter norm,
this stabilizes training when different layers have weights
of different scales.
Emprically, setting it to True can handle 10x larger rho than
setting it to False.
minimize_fp: if True, min(f_p, h), original GSAM;
if False, min(f, h), where f is the clean loss.
f_p is the perturbed loss, h is the surrogate gap.
If True, training dynamics is closer to SAM than conventional training,
you might observe several loss spikes during training.
If False, the training dynamics is closer to conventional training,
and is often more stable (fewer loss spikes) during training.
Returns:
l_clean: the loss function value.
g_gsam: the GSAM gradient. g_gsam is not averaged across workers,
need to call "jax.lax.pmean" to average.
Note:
Setting `rho_max=rho_min` and `alpha=0` reduces GSAM to SAM.
"""
l_clean, g_clean = jax.value_and_grad(loss_fn)(params, inputs, targets)
g_clean_normalized, g_clean_length = dual_vector(g_clean)
if lr_max == lr_min:
sam_rho = rho_max
else:
sam_rho = rho_min + (rho_max - rho_min) * (lr - lr_min) / (lr_max - lr_min)
# Per-worker perturbation.
if adaptive_perturbation:
param_sam = jax.tree_map(lambda a, b: a + \
jnp.abs(a) * sam_rho * b / (g_clean_length + eps), params, g_clean)
else:
param_sam = jax.tree_map(lambda a, b: a + \
sam_rho * b / (g_clean_length + eps), params, g_clean)
# Get gradients at perturbed weights.
_, g_robust = jax.value_and_grad(loss_fn)(param_sam, inputs, targets)
# Decompose gradients.
g_clean_flatten, _ = jax.tree_util.tree_flatten(g_clean)
g_robust_flatten, _ = jax.tree_util.tree_flatten(g_robust)
if minimize_fp:
# Decompose g_clean onto parallel and vertical to g_robust.
g_robust_normalized, _ = dual_vector(g_robust)
g_robust_normalized_flatten, _ = jax.tree_util.tree_flatten(
g_robust_normalized)
g_clean_projection_norm = sum(jnp.vdot(p, q) for (p,q) in
zip(g_robust_normalized_flatten, g_clean_flatten))
g_clean_residual = jax.tree_map(lambda a, b:
a - g_clean_projection_norm * b, g_clean, g_robust_normalized)
# Get GSAM gradient.
g_gsam = jax.tree_map(lambda a, b: a - b * alpha,
g_robust, g_clean_residual)
else:
# Decompose g_robust onto parallel and vertical to g_clean.
g_clean_normalized, g_clean_length = dual_vector(g_clean)
g_clean_normalized_flatten, _ = jax.tree_util.tree_flatten(
g_clean_normalized)
g_robust_projection_norm = sum(jnp.vdot(p, q) for (p,q) in
zip(g_clean_normalized_flatten, g_robust_flatten))
g_robust_residual = jax.tree_map(lambda a, b:
a - g_robust_projection_norm * b, g_robust, g_clean_normalized)
# Get GSAM gradient.
g_gsam = jax.tree_map(lambda a, b: a + b * alpha,
g_clean, g_robust_residual)
# Always return the clean loss (rather than the perturbed loss).
return l_clean, g_gsam
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop example.
Trainer that implements SAM/GSAM optimizers.
"""
# pylint: disable=consider-using-from-import
from functools import partial
import importlib
import multiprocessing.pool
import os
from absl import app
from absl import flags
from absl import logging
import big_vision.evaluators.common as eval_common
import big_vision.input_pipeline as input_pipeline
import big_vision.optax as bv_optax
import big_vision.pp.builder as pp_builder
from big_vision.trainers.proj.gsam.gsam import gsam_gradient
import big_vision.utils as u
from clu import parameter_overview
import flax
import jax
import jax.numpy as jnp
from ml_collections import config_flags
import numpy as np
import optax
import tensorflow as tf
import tensorflow.io.gfile as gfile
# pylint: disable=logging-fstring-interpolation
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", default=None, help="Work unit directory.")
flags.DEFINE_boolean("cleanup", default=False,
help="Delete workdir (only) after successful completion.")
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
config = flags.FLAGS.config
workdir = flags.FLAGS.workdir
logging.info(
f"\u001b[33mHello from process {jax.process_index()} holding "
f"{jax.local_device_count()}/{jax.device_count()} devices and "
f"writing to workdir {workdir}.\u001b[0m")
assert not config.get("grad_accum_steps"), "Grad-acc not supported anymore."
save_checkpoint_path = None
if workdir and config.get("checkpoint_steps"):
gfile.makedirs(workdir)
save_checkpoint_path = os.path.join(workdir, "checkpoint.npz")
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Here we register preprocessing ops from modules listed on `pp_modules`.
for m in config.get("pp_modules", ["ops_general", "ops_image"]):
importlib.import_module(f"big_vision.pp.{m}")
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
# See (internal link) for a fun read on what it takes.
rng = jax.random.PRNGKey(config.get("seed", 0))
# These functions do more stuff internally, for OSS release we mock them by
# trivial alternatives in order to minize disruptions in the code.
xid, wid = -1, -1
fillin = lambda s: s
def info(s, *a):
logging.info("\u001b[33mNOTE\u001b[0m: " + s, *a)
def write_note(note):
if jax.process_index() == 0:
info("%s", note)
# Verify settings to make sure no checkpoints are accidentally missed.
if config.get("keep_checkpoint_steps"):
assert config.get("checkpoint_steps"), "Specify `checkpoint_steps`."
assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (
f"`keep_checkpoint_steps` ({config.checkpoint_steps}) should be"
f"divisible by `checkpoint_steps ({config.checkpoint_steps}).`")
batch_size = config.batch_size
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must "
f"be divisible by device number ({jax.device_count()})")
info("Global batch size %d on %d hosts results in %d local batch size. With "
"%d dev per host (%d dev total), that's a %d per-device batch size.",
batch_size, jax.process_count(), batch_size // jax.process_count(),
jax.local_device_count(), jax.device_count(),
batch_size // jax.device_count())
# First thing after above sanity checks, so we can log "start" ticks.
mw = u.BigVisionMetricWriter(xid, wid, workdir)
chrono = u.Chrono()
write_note("Initializing train dataset...")
train_ds = input_pipeline.make_for_train(
dataset=config.dataset,
split=config.train_split,
batch_size=config.batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(config.pp_train),
shuffle_buffer_size=config.get("shuffle_buffer_size"),
cache_raw=config.get("cache_raw", False),
data_dir=fillin(config.get("dataset_dir")))
n_prefetch = config.get("prefetch_to_device", 1)
train_iter = input_pipeline.start_input_pipeline(train_ds, n_prefetch)
ntrain_img = input_pipeline.get_num_examples(
config.dataset, config.train_split,
data_dir=fillin(config.get("dataset_dir")))
steps_per_epoch = ntrain_img / batch_size
if config.get("num_epochs"):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get("total_steps"), "Set either num_epochs or total_steps"
else:
total_steps = config.total_steps
info("Running for %d steps, that means %f epochs and %f steps per epoch",
total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)
write_note(f"Initializing {config.model_name} model...")
model_mod = importlib.import_module(f"big_vision.models.{config.model_name}")
model = model_mod.Model(
num_classes=config.num_classes, **config.get("model", {}))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend="cpu")
def init(rng):
shape = tuple(train_ds.element_spec["image"].shape[1:])
bs = config.batch_size // jax.device_count()
dummy_input = jnp.zeros((bs,) + shape, jnp.float32)
params = flax.core.unfreeze(model.init(rng, dummy_input))["params"]
# Set bias in the head to a low value, such that loss is small initially.
if "init_head_bias" in config:
params["head"]["bias"] = jnp.full_like(params["head"]["bias"],
config["init_head_bias"])
return params
rng, rng_init = jax.random.split(rng)
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_util.tree_leaves(params_cpu))
parameter_overview.log_parameter_overview(params_cpu, msg="init params")
mw.measure("num_params", num_params)
write_note(f"Initializing {config.optax_name} optimizer...")
tx, sched_fns = bv_optax.make(config, params_cpu, sched_kw=dict(
global_batch_size=batch_size,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch))
assert len(sched_fns) == 1, "Current GSAM supports one global learning-rate."
# We jit this, such that the arrays are created on the CPU, not device[0].
opt_cpu = jax.jit(tx.init, backend="cpu")(params_cpu)
sched_fns_cpu = [jax.jit(sched_fn, backend="cpu") for sched_fn in sched_fns]
@partial(jax.pmap, axis_name="batch", donate_argnums=(0, 1))
def update_fn(params, opt, rng, images, labels, step):
"""Update step."""
measurements = {}
if config.get("mixup") and config.mixup.p:
rng, (images, labels), _ = u.mixup(rng, images, labels, **config.mixup)
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index("batch"))
def loss_fn(params, images, labels):
logits, _ = model.apply(
{"params": flax.core.freeze(params)}, images,
train=True, rngs={"dropout": rng_model_local})
return getattr(u, config.get("loss", "sigmoid_xent"))(
logits=logits, labels=labels)
learning_rate = sched_fns[0](step) * config.lr
l, grads = gsam_gradient(loss_fn=loss_fn, params=params, inputs=images,
targets=labels, lr=learning_rate, **config.gsam)
l, grads = jax.lax.pmean((l, grads), axis_name="batch")
updates, opt = tx.update(grads, opt, params)
params = optax.apply_updates(params, updates)
gs = jax.tree_leaves(bv_optax.replace_frozen(config.schedule, grads, 0.))
measurements["l2_grads"] = jnp.sqrt(sum(jnp.vdot(g, g) for g in gs))
ps = jax.tree_util.tree_leaves(params)
measurements["l2_params"] = jnp.sqrt(sum(jnp.vdot(p, p) for p in ps))
us = jax.tree_util.tree_leaves(updates)
measurements["l2_updates"] = jnp.sqrt(sum(jnp.vdot(u, u) for u in us))
return params, opt, rng, l, measurements
# We do not jit/pmap this function, because it is passed to evaluator that
# does it later. We output as many intermediate tensors as possible for
# maximal flexibility. Later `jit` will prune out things that are not needed.
def predict_fn(params, image):
logits, out = model.apply({"params": params}, image)
return logits, out
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_checkpoint_path = None
if save_checkpoint_path and gfile.exists(save_checkpoint_path):
resume_checkpoint_path = save_checkpoint_path
elif config.get("resume"):
resume_checkpoint_path = fillin(config.resume)
if resume_checkpoint_path:
write_note("Resume training from checkpoint...")
checkpoint = {
"params": params_cpu,
"opt": opt_cpu,
"chrono": chrono.save(),
}
checkpoint_tree = jax.tree_structure(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_checkpoint_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
params_cpu, opt_cpu = checkpoint["params"], checkpoint["opt"]
chrono.load(checkpoint["chrono"])
elif config.get("model_init"):
write_note(f"Initialize model from {config.model_init}...")
params_cpu = model_mod.load(
params_cpu, config.model_init, config.get("model"),
**config.get("model_load", {}))
if jax.process_index() == 0:
parameter_overview.log_parameter_overview(
params_cpu, msg="restored params")
write_note("Kicking off misc stuff...")
first_step = bv_optax.get_count(opt_cpu)
chrono.inform(first_step, total_steps, batch_size, steps_per_epoch)
prof = None # Keeps track of start/stop of profiler state.
write_note(f"Replicating...\n{chrono.note}")
params_repl = flax.jax_utils.replicate(params_cpu)
opt_repl = flax.jax_utils.replicate(opt_cpu)
evaluators = eval_common.from_config(
config, {"predict": predict_fn},
lambda s: write_note(f"Initializing evaluator: {s}...\n{chrono.note}"))
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax.jax_utils.replicate(rng_loop)
checkpoint_writer = None
write_note(f"First step compilations...\n{chrono.note}")
error = None # For exiting with an error after cleanup. Avoids indentation.
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, train_batch in zip(
range(first_step + 1, total_steps + 1), train_iter):
mw.step_start(step)
with jax.profiler.StepTraceAnnotation("train_step", step_num=step):
params_repl, opt_repl, rngs_loop, loss_value, measurements = update_fn(
params_repl, opt_repl, rngs_loop,
train_batch["image"],
train_batch["labels"],
flax.jax_utils.replicate(step))
# On the first host, let's always profile a handful of early steps.
if jax.process_index() == 0:
prof = u.startstop_prof(prof, step, first_step, config.log_training_steps)
# Report training progress
if (u.itstime(step, config.log_training_steps, total_steps, host=0)
or chrono.warmup and jax.process_index() == 0):
for i, sched_fn_cpu in enumerate(sched_fns_cpu):
mw.measure(f"global_schedule{i if i else ''}", sched_fn_cpu(step - 1))
l = mw.measure("training_loss", loss_value[0])
for name, value in measurements.items():
mw.measure(name, value[0])
chrono.tick(step, mw.measure, write_note)
if not np.isfinite(l):
error = (f"The loss became nan or inf somewhere within steps "
f"[{step - config.log_training_steps}, {step}]")
break
# Checkpoint saving
if (save_checkpoint_path and
u.itstime(step, config.get("checkpoint_steps"), total_steps, host=0)):
chrono.pause(wait_for=(params_repl, opt_repl))
u.checkpointing_timeout(checkpoint_writer,
config.get("checkpoint_timeout", 1))
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see (internal link)). Also, takes device 0's params only.
params_cpu = jax.tree_map(lambda x: np.array(x[0]), params_repl)
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, config.get("keep_checkpoint_steps"), total_steps):
copy_step = step
ckpt = {"params": params_cpu, "opt": opt_cpu, "chrono": chrono.save()}
checkpoint_writer = pool.apply_async(
u.save_checkpoint, (ckpt, save_checkpoint_path, copy_step))
chrono.resume()
for (name, evaluator, log_steps, prefix) in evaluators:
if u.itstime(step, log_steps, total_steps):
chrono.pause(wait_for=params_repl)
write_note(f"{name} evaluation...\n{chrono.note}")
for key, value in evaluator.run(params_repl):
mw.measure(f"{prefix}{key}", value)
chrono.resume()
mw.step_end()
# Always give a chance to stop the profiler, no matter how things ended.
# TODO: can we also do this when dying of an exception like OOM?
if jax.process_index() == 0 and prof is not None:
u.startstop_prof(prof)
# Last note needs to happen before the pool's closed =)
if not error:
write_note(f"Done!\n{chrono.note}")
else:
write_note(f"Failed!\n{error}\n{chrono.note}")
pool.close()
pool.join()
mw.close()
# Make sure all hosts stay up until the end of main.
u.sync_all_hosts()
# Before cleanup, as cleanup should only run for successful jobs.
if error is not None:
raise RuntimeError(error)
u.maybe_cleanup_workdir(workdir, flags.FLAGS.cleanup, info)
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator for the classfication task."""
# pylint: disable=consider-using-from-import
from functools import partial, lru_cache
import big_vision.datasets.core as ds_core
import big_vision.input_pipeline as input_pipeline
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import jax
import jax.numpy as jnp
import numpy as np
# To avoid re-compiling the function for every new instance of the same
# evaluator on a different dataset!
@lru_cache(None)
def get_eval_fn(predict_fn, loss_name):
"""Produces eval function, also applies pmap."""
@partial(jax.pmap, axis_name='batch')
def _eval_fn(params, batch, labels, mask):
logits, *_ = predict_fn(params, **batch)
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
losses = getattr(u, loss_name)(
logits=logits, labels=labels, reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
top1_correct = jnp.take_along_axis(
labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
return ncorrect, loss, n
return _eval_fn
class Evaluator:
"""Classification evaluator."""
def __init__(self, predict_fn, data, pp_fn, batch_size, loss_name,
cache_final=True, cache_raw=False, prefetch=1,
label_key='labels'):
data = ds_core.get(**data)
pp_fn = pp_builder.get_preprocess_fn(pp_fn)
self.ds, self.steps = input_pipeline.make_for_inference(
data.get_tfdata(ordered=True), pp_fn, batch_size,
num_ex_per_process=data.num_examples_per_process(),
cache_final=cache_final, cache_raw=cache_raw)
self.data_iter = input_pipeline.start_input_pipeline(self.ds, prefetch)
self.eval_fn = get_eval_fn(predict_fn, loss_name)
self.label_key = label_key
def run(self, params):
"""Computes all metrics."""
ncorrect, loss, nseen = 0, 0, 0
for _, batch in zip(range(self.steps), self.data_iter):
labels, mask = batch.pop(self.label_key), batch.pop('_mask')
batch_ncorrect, batch_losses, batch_n = self.eval_fn(
params, batch, labels, mask)
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
yield ('prec@1', ncorrect / nseen)
yield ('loss', loss / nseen)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for few-shot evaluation."""
# pylint: disable=consider-using-from-import
import functools
import big_vision.datasets.core as ds_core
import big_vision.input_pipeline as input_pipeline
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import jax
import jax.numpy as jnp
import numpy as np
BIAS_CONSTANT = 100.0
# Setup function for few-shot regression on CPU to avoid "polluting" the TPU.
@functools.partial(jax.jit, backend="cpu", static_argnums=(2,))
def _precompute_cache(x, y, num_classes):
"""Cache quantities to speed-up the computation of L2-regularized least-sq."""
# Whiten
mean = jnp.mean(x, axis=0, keepdims=True)
std = jnp.std(x, axis=0, keepdims=True) + 1e-5
x = (x - mean) / std
# Add a constant feature for the bias, large so it's almost unregularized:
x = jnp.pad(x, ((0, 0), (0, 1)), constant_values=BIAS_CONSTANT)
# To one-hot representation rescaled into {-1, 1}
y = 2.0 * jax.nn.one_hot(y, num_classes) - 1.0
num_points, dim = x.shape
# Let N be the number of points, D the dimension and C the number of classes.
# We have x of shape (N, D) and y of shape (N, C).
# For least-squares, we can compute
#
# (A) when N >= D, (x^T x + l2 Id)^{-1} x^T y
# (B) when D > N, x^T (x x^T + l2 Id)^{-1} y
#
# We pre-compute the eigen-decomposition of either x^T x or x x^T which
# becomes q diag(eigs) q^T with q unitary matrix either (D, D) or (N, N)
# and eigs a vector (D,) or (N,).
#
# For any l2 > 0, we can compute (x^T x + l2 Id)^{-1} or (x x^T + l2 Id)^{-1}
# by simply computing q (diag(eigs) + l2 Id)^{-1} q^T.
# (SVD would be more natural here, but it proved slower, so we use eigh)
#
# Both cases (A) and (B) can be viewed as lhs (diag(eigs) + l2 Id)^{-1} rhs,
# where lhs/rhs are pre-computed left/right-hand sides to specify.
#
# Detailed evaluation in terms of time and fewshot metrics can be found in
# (internal link)
#
# Implemented by Rodolphe Jenatton.
if num_points >= dim:
eigs, q = jnp.linalg.eigh(x.T @ x)
rhs = q.T @ (x.T @ y)
lhs = q
else:
eigs, q = jnp.linalg.eigh(x @ x.T)
rhs = q.T @ y
lhs = x.T @ q
cache = {
"eigs": eigs,
"rhs": rhs,
"lhs": lhs,
"mean": mean,
"std": std
}
return cache
@functools.partial(jax.jit, backend="cpu")
def _eig_fewshot_acc_fn(cache, x_test, y_test, l2_reg):
"""Computes (x,y) linear regression accuracy on (x_test, y_test)."""
x_test = (x_test - cache["mean"]) / cache["std"]
x_test = jnp.pad(x_test, ((0, 0), (0, 1)), constant_values=BIAS_CONSTANT)
rhs = cache["rhs"]
lhs = cache["lhs"]
eigs = cache["eigs"]
# See comments in _precompute_cache for context about the formula.
scaling = 1.0 / (eigs + l2_reg * jnp.ones_like(eigs))
scaling = scaling.reshape((1, -1))
w = (lhs * scaling) @ rhs
# Predict test-set values and measure their accuracy
preds = jnp.argmax(x_test @ w, axis=1)
return jnp.mean(preds == y_test)
class Evaluator:
"""Class for few-shot evaluation."""
def __init__(self, predict_fn, batch_size,
representation_layer, datasets, shots, l2_reg,
pp_train, pp_eval, display_first,
num_seeds=3,
label_key="label", mask_key="_mask"):
self.repr_fn = self.get_representation_fn(
predict_fn, representation_layer)
self.datasets = datasets
self.shots = shots
self.l2_reg = l2_reg
self.batch_size = batch_size
self.pp_tr = pp_train
self.pp_te = pp_eval
self.display_first = display_first
self._datasets = {} # Cache for tfds data. Persists while object is alive.
self._repr = {} # Cache for precomputed repr. Persists within the run call.
self.num_seeds = num_seeds
self.label_key = label_key
self.mask_key = mask_key
def get_representation_fn(self, predict_fn, representation_layer):
@functools.partial(jax.pmap, axis_name="batch")
def _repr_fn(params, batch, labels, mask):
*_, out = predict_fn(params, **batch)
rep = u.tree_get(out, representation_layer)
rep = jax.lax.all_gather(rep, "batch")
labels = jax.lax.all_gather(labels, "batch")
mask = jax.lax.all_gather(mask, "batch")
return rep, labels, mask
return _repr_fn
# Setup input pipeline.
def _get_dataset(self, dataset, train_split, test_split):
"""Lazy-loads given dataset."""
key = (dataset, train_split, test_split)
try:
return self._datasets[key]
except KeyError:
# NOTE: only supporting TFDS data for now for bwd compat/lazyness.
train_data = ds_core.get(name=dataset, split=train_split)
train_ds, batches_tr = input_pipeline.make_for_inference(
train_data.get_tfdata(ordered=True),
num_ex_per_process=train_data.num_examples_per_process(),
batch_size=self.batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(self.pp_tr))
test_data = ds_core.get(name=dataset, split=test_split)
test_ds, batches_te = input_pipeline.make_for_inference(
test_data.get_tfdata(ordered=True),
num_ex_per_process=test_data.num_examples_per_process(),
batch_size=self.batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(self.pp_te))
num_classes = train_data.builder.info.features["label"].num_classes
return self._datasets.setdefault(
key, (train_ds, batches_tr, test_ds, batches_te, num_classes))
def _get_repr(self, params, data, steps):
"""Compute representation for the whole dataset."""
pre_logits_list = []
labels_list = []
for batch, _ in zip(input_pipeline.start_input_pipeline(data, 0),
range(steps)):
labels, mask = batch.pop(self.label_key), batch.pop(self.mask_key)
pre_logits, labels, mask = self.repr_fn(
params, batch, labels, mask)
# Shapes at this point are:
# pre_logits: (hosts, devices, global_batch, features)
# labels: (hosts, devices, global_batch)
# mask: (hosts, devices, global_batch)
mask = np.array(mask[0]).astype(bool)
pre_logits_list.append(np.array(pre_logits[0])[mask])
labels_list.append(np.array(labels[0])[mask])
pre_logits = np.concatenate(pre_logits_list, axis=0)
labels = np.concatenate(labels_list, axis=0)
return pre_logits, labels
def compute_fewshot_metrics(self, params, seed,
dataset, train_split, test_split):
"""Compute few-shot metrics on one dataset."""
if dataset in self._repr:
repr_train, labels_train, repr_test, labels_test, num_classes = (
self._repr[dataset])
else:
train_ds, steps_tr, test_ds, steps_te, num_classes = self._get_dataset(
dataset, train_split, test_split)
repr_train, labels_train = self._get_repr(params, train_ds, steps_tr)
repr_test, labels_test = self._get_repr(params, test_ds, steps_te)
self._repr[dataset] = (repr_train, labels_train,
repr_test, labels_test,
num_classes)
# Collect where we have samples of which classes.
rng = np.random.default_rng(seed)
class_indices = [rng.permutation(np.where(labels_train == cls_i)[0])
for cls_i in range(num_classes)]
results = {}
for shots in self.shots:
all_idx = [indices[:shots] for indices in class_indices]
all_idx = np.concatenate(all_idx, axis=0)
x = repr_train[all_idx]
y = labels_train[all_idx]
# Note the code is optimized to solve multiple LSR tasks for changing l2
# strength, even though we currently used the fixed l2_reg constant.
cache = _precompute_cache(x, y, num_classes)
acc = _eig_fewshot_acc_fn(cache, repr_test, labels_test, self.l2_reg)
results[shots] = np.array(acc)
return results
def run(self, params):
"""New API executed in terms of old API."""
self._repr = {}
for seed in range(self.num_seeds):
for name, dataset_args in self.datasets.items():
result = self.compute_fewshot_metrics(params, seed, *dataset_args)
for shots, v in result.items():
prefix = "a/" if (name, shots) in self.display_first else "z/"
suffix = f"-seed-{seed}"
yield f"{prefix}{name}_{shots}shot{suffix}", v
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator for computing mean of per-example metrics.
This evaluator can be used in two ways:
1. Create a new evaluator with reduced boilerplate by inheriting from it.
2. For quick prototyping, use this with predict_fns which return the metrics.
"""
from functools import partial
from typing import Mapping
from big_vision import input_pipeline
from big_vision.datasets import core as ds_core
from big_vision.pp import builder as pp_builder
import jax
import jax.numpy as jnp
import numpy as np
# Note: global to avoid jax re-compiling across different evaluator instances.
@partial(jax.pmap, static_broadcasted_argnums=0, axis_name='batch')
def _run_predict_fn(predict_fn, params, batch):
"""Sum per-example metrics weighted by `_mask`."""
mask = batch['_mask']
metrics = predict_fn(params, batch)
# Sanity check output format of predict_fn.
assert isinstance(metrics, Mapping), 'predict_fn must return a dict'
for y in jax.tree_leaves(metrics):
if y.shape != mask.shape:
raise ValueError(
f'Expected per-example metrics of shape {mask.shape} found '
f'{jax.tree_map(lambda x: x.shape, metrics)}.')
metrics = {**metrics, '_mask': mask}
metrics = jax.tree_map(lambda x: jnp.sum(jnp.where(mask, x, 0)), metrics)
return jax.lax.psum(metrics, axis_name='batch')
class Evaluator:
"""Report the mean of per-example metrics computed by predict_fn.
`predict_fn(params, batch)` must return a dict from metric name to
per-example metrics of shape [batch_size].
"""
def __init__(self, predict_fn, data, pp_fn, batch_size,
cache_final=True, cache_raw=False, prefetch=1):
data = ds_core.get(**data)
self.dataset, self.steps = input_pipeline.make_for_inference(
data.get_tfdata(ordered=True), batch_size=batch_size,
num_ex_per_process=data.num_examples_per_process(),
preprocess_fn=pp_builder.get_preprocess_fn(pp_fn),
cache_final=cache_final, cache_raw=cache_raw)
self.data_iter = input_pipeline.start_input_pipeline(self.dataset, prefetch)
self.predict_fn = partial(_run_predict_fn, predict_fn)
def run(self, params):
"""Computes all metrics."""
metrics = []
# Compute batch metrics without blocking.
for _, batch in zip(range(self.steps), self.data_iter):
batch_metrics = self.predict_fn(params, batch)
metrics.append(batch_metrics)
# Transfer metrics from device 0 to host (blocking).
metrics = jax.device_get(jax.tree_map(lambda x: x[0], metrics))
metrics_sum = jax.tree_map(lambda *x: np.sum(x), *metrics)
mask_sum = metrics_sum.pop('_mask')
for key, value_sum in metrics_sum.items():
yield (key, value_sum / mask_sum)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for evaluators in general."""
import dataclasses
import functools
import importlib
from typing import Any, Callable
import flax
def from_config(config, predict_fns,
write_note=lambda s: s,
get_steps=lambda key, cfg: cfg[f"{key}_steps"]):
"""Creates a list of evaluators based on `config`."""
evaluators = []
specs = config.get("evals", {})
for name, cfg in specs.items():
write_note(name)
# Pop all generic settings off so we're left with eval's kwargs in the end.
cfg = cfg.to_dict()
module = cfg.pop("type", name)
pred_key = cfg.pop("pred", "predict")
pred_kw = cfg.pop("pred_kw", None)
prefix = cfg.pop("prefix", f"{name}/")
logsteps = get_steps("log", cfg)
for typ in ("steps", "epochs", "examples", "percent"):
cfg.pop(f"log_{typ}", None)
# Use same batch_size as eval by default, to reduce fragmentation.
# TODO: eventually remove all the deprecated names...
cfg["batch_size"] = cfg.get("batch_size") or config.get("batch_size_eval") or config.get("input.batch_size") or config.get("batch_size") # pylint: disable=line-too-long
module = importlib.import_module(f"big_vision.evaluators.{module}")
try:
predict_fn = predict_fns[pred_key]
except KeyError as e:
raise ValueError(
f"Unknown predict_fn '{pred_key}'. Available predict_fns are:\n"
+ "\n".join(predict_fns)) from e
if pred_kw is not None:
predict_fn = _CacheablePartial(predict_fn, flax.core.freeze(pred_kw))
evaluator = module.Evaluator(predict_fn, **cfg)
evaluators.append((name, evaluator, logsteps, prefix))
return evaluators
@dataclasses.dataclass(frozen=True, eq=True)
class _CacheablePartial:
"""partial(fn, **kwargs) that defines hash and eq - to help with jit caches.
This is particularly common in evaluators when one has many evaluator
instances that run on difference slices of data.
Example:
```
f1 = _CacheablePartial(fn, a=1)
jax.jit(f1)(...)
jax.jit(_CacheablePartial(fn, a=1))(...) # fn won't be retraced.
del f1
jax.jit(_CacheablePartial(fn, a=1))(...) # fn will be retraced.
```
"""
fn: Callable[..., Any]
kwargs: flax.core.FrozenDict
def __call__(self, *args, **kwargs):
return functools.partial(self.fn, **self.kwargs)(*args, **kwargs)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator for the classfication task."""
from functools import partial, lru_cache
from big_vision import input_pipeline
import big_vision.datasets.core as ds_core
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import einops
import jax
import jax.numpy as jnp
import numpy as np
def dist(student, teacher, kind, feat_axis=-1,
epsilon=1e-12, t=1, ls=0.0, k=1):
"""Distance function used for distillation."""
diff = student - teacher
if kind == 'euclidean':
return jnp.sqrt(jnp.sum(diff * diff, axis=feat_axis) + epsilon)
elif kind == 'l2':
return jnp.sum(diff * diff, axis=feat_axis)
elif kind == 'hard':
pseudolabels = jnp.argmax(teacher, feat_axis)
pl = u.onehot(pseudolabels, teacher.shape[feat_axis])
if ls:
pl = (1.0 - ls) * pl + (ls / (pl.shape[-1] - 1)) * (1.0 - pl)
return u.softmax_xent(logits=student, labels=pl,
reduction=False, kl=True, axis=feat_axis)
elif kind == 'kl':
return t**2 * u.softmax_xent(
logits=student / t,
labels=jax.nn.softmax(teacher / t),
reduction=False, kl=True, axis=feat_axis)
elif kind == 'logsoftmax_euclidean':
logsoftmax_diff = (
jax.nn.log_softmax(student, axis=feat_axis) -
jax.nn.log_softmax(teacher, axis=feat_axis))
return jnp.sqrt(
jnp.sum(logsoftmax_diff * logsoftmax_diff, axis=feat_axis) + epsilon)
elif kind == 'agree':
def get_top_k(arr, k, ax):
return jax.lax.top_k(arr.swapaxes(ax, -1), k)[1].swapaxes(ax, -1)
return (get_top_k(student, k, feat_axis) ==
get_top_k(teacher, 1, feat_axis)).sum(feat_axis)
else:
assert False, f'Unknown kind of distance {kind}.'
@lru_cache(None)
def get_dist_fn(**kw):
return partial(dist, **kw)
# To avoid re-compiling the function for every new instance of the same
# evaluator on a different dataset!
@lru_cache(None)
def get_eval_fn(student_teacher_fwd, what, distances):
"""Produces eval function, also applies pmap."""
@partial(jax.pmap, axis_name='batch')
def _eval_fn(params, batch, mask):
(_, out_s), (_, out_t) = student_teacher_fwd(params, **batch)
repr_s = u.tree_get(out_s, what[0])
repr_t = u.tree_get(out_t, what[1])
# Let's flatten any non-vectors (eg feature-maps).
repr_s = einops.rearrange(repr_s, 'b ... -> b (...)')
repr_t = einops.rearrange(repr_t, 'b ... -> b (...)')
all_ds = []
# NOTE: we're gathering and returning all ; if this becomes too slow, we
# can change to compute and return summary stats later on.
for dist_fn in distances:
ds = dist_fn(repr_s, repr_t)
all_ds.append(jax.lax.all_gather(ds, axis_name='batch'))
all_masks = jax.lax.all_gather(mask, axis_name='batch')
return all_ds, all_masks
return _eval_fn
class Evaluator:
"""Distillation distance evaluator."""
def __init__(self, student_teacher_fwd, data, pp_fn, distances,
what=('logits', 'logits'), **data_kw):
data = ds_core.get(**data)
pp_fn = pp_builder.get_preprocess_fn(pp_fn)
prefetch = data_kw.pop('prefetch', 1)
self.ds, self.steps = input_pipeline.make_for_inference(
data.get_tfdata(ordered=True), pp_fn,
num_ex_per_process=data.num_examples_per_process(), **data_kw)
self.data_iter = input_pipeline.start_input_pipeline(self.ds, prefetch)
dist_fns = tuple(get_dist_fn(**dist) for dist in distances)
self.dist_names = [
'_'.join(f'{k}={v}' for k, v in dist.items()) for dist in distances]
self.eval_fn = get_eval_fn(student_teacher_fwd, what, dist_fns)
def run(self, params):
"""Computes all metrics."""
all_ds = [[] for _ in self.dist_names]
for _, batch in zip(range(self.steps), self.data_iter):
mask = batch.pop('_mask')
batch_ds, batch_ms = self.eval_fn(params, batch, mask)
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical.
# So let's just take the first one to the host as numpy.
batch_ms = np.array(batch_ms[0]).flatten()
for i, val in enumerate(batch_ds):
all_ds[i].append(np.array(val[0]).flatten()[batch_ms == 1])
for name, ds in zip(self.dist_names, all_ds):
ds = np.concatenate(ds)
yield f'{name}/all', ds
yield f'{name}/avg', np.mean(ds)
yield f'{name}/min', np.min(ds)
yield f'{name}/max', np.max(ds)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation producing ColTran FID-5K metric."""
import functools
import os
from absl import logging
import einops
import jax
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_gan as tfgan
import tensorflow_hub as tfhub
from tensorflow.io import gfile
ROOT = os.environ.get("FID_DATA_DIR", ".")
def _preprocess(image, resolution=512):
"""ColTran dataset preprocessing.
See,
github.com/google-research/google-research/blob/master/coltran/datasets.py#L44
Args:
image: ImageNet example from TFDS.
resolution: Integer representing output size.
Returns:
An int32 image of size (resolution, resolution, 3).
"""
image_shape = tf.shape(image)
height, width = image_shape[0], image_shape[1]
side_size = tf.minimum(height, width)
image = tf.image.resize_with_crop_or_pad(
image, target_height=side_size, target_width=side_size)
image = tf.image.resize(image, method="area", antialias=True,
size=(resolution, resolution))
image = tf.cast(tf.round(image), dtype=tf.int32)
return image
def _normalize(x):
"""Coltran normalization to expected range for Inception module.
Args:
x: Image with values in [0,255].
Returns:
Image with values in [-1,1].
"""
x = tf.cast(x, tf.float32)
x = (x / 128.0) - 1.0 # note: 128.0 is the value used in ColTran.
return x
class Evaluator:
"""ColTran FID-5K Evaluator.
This Evaluator aims to mirror the evaluation pipeline used by Kumar et.al.
in Colorization Transformer (https://arxiv.org/abs/2102.04432).
To be clear: much of this code is direct snippets from ColTran code.
See,
github.com/google-research/google-research/blob/master/coltran/datasets.py#L44
The ColTran pipeline has numerous stages, where serialied data is passed
between binaries via file, etc... While we don't physically write the same
files, we simulate the effects of the serialization (e.g., quantization).
"""
def __init__(self,
predict_fn,
batch_size, # ignored
device_batch_size=5,
coltran_seed=1,
predict_kwargs=None):
"""Create Evaluator.
Args:
predict_fn: Colorization prediction function. Expects grayscale images
of size (512, 512, 3) in keys `image` and `image_ctx` with values in
the range [-1,1]. Outputs `color` image in range [-1,1].
batch_size: ignored.
device_batch_size: number of images per batch, per device.
coltran_seed: used to specify the block of 5_000 images used to generate
the reference pool. Value of `1` matches default ColTran code.
predict_kwargs: arguments passed to `predict_fn`.
"""
del batch_size
self.num_devices = jax.local_device_count()
self.device_batch_size = device_batch_size
logging.log(logging.INFO, "Colorizing with batch size %i on %i devices.",
self.device_batch_size, self.num_devices)
assert 5_000 % (self.device_batch_size * self.num_devices) == 0
predict = functools.partial(predict_fn, **(predict_kwargs or {}))
self.predict_fn = jax.pmap(predict)
module = tfhub.load(tfgan.eval.INCEPTION_TFHUB)
def _pools(x):
return np.squeeze(module(x)[tfgan.eval.INCEPTION_FINAL_POOL].numpy())
self.inception_pool = _pools
# Setup the colorization dataset.
# TRICKY: ColTran FID-5k uses the first 5_000 images returned as read by
# default from tensorflow_datasets (that is: with shard interleaving).
# In particular note that it is different than the set of images returned
# by "validation[:5000]".
def _eval_data_preprocess(example):
# Colorization happens at 512x512 resolution.
image = _preprocess(example["image"], resolution=512)
image = _normalize(image)
grayscale = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return {
"image": image,
"grayscale": grayscale,
"file_name": example["file_name"]
}
ds = tfds.load("imagenet2012", split="validation")
ds = ds.map(_eval_data_preprocess)
ds = ds.take(5_000)
ds = ds.batch(self.device_batch_size)
ds = ds.batch(self.num_devices)
self.eval_data = ds.cache().prefetch(tf.data.AUTOTUNE)
# Setup the reference dataset.
def _reference_data_preprocess(example):
# ColTran eval operates on 256x256.
image = _preprocess(example["image"], resolution=256)
image = _normalize(image)
return {"image": image, "file_name": example["file_name"]}
ds = tfds.load("imagenet2012", split="validation")
ds = ds.map(_reference_data_preprocess)
# Skip the images used in colorization.
ds = ds.skip(5_000)
# ColTran eval w/ seed=1 effectively uses 10_000:15_000 to
# calculate reference.
ds = ds.skip(coltran_seed * 5_000)
ds = ds.take(5_000)
ds = ds.batch(device_batch_size)
self.reference_data = ds.cache().prefetch(tf.data.AUTOTUNE)
def _get_file(name):
return os.path.join(ROOT, name)
with gfile.GFile(_get_file("eval_file_names.txt")) as f:
self.eval_file_names = frozenset(f.read().splitlines())
with gfile.GFile(_get_file("reference_file_names.txt")) as f:
self.reference_file_names = frozenset(f.read().splitlines())
def run(self, params):
"""Run eval."""
if jax.process_index(): # Host0 does all work.
return
color_pools = []
color_file_names = set()
for i, batch in enumerate(self.eval_data.as_numpy_iterator()):
predict_batch = {
"labels": batch["image"],
"image": batch["grayscale"],
"image_ctx": batch["grayscale"],
}
y = self.predict_fn(params, predict_batch)
y = y["color"]
y = einops.rearrange(y, "d b h w c -> (d b) h w c")
# Return to the ColTran eval size of 256x256.
y = tf.image.resize(y, (256, 256), "area")
# Mimic effect of serializing image as integers and map back to [-1, 1].
y = np.clip(np.round((y + 1.) * 128.), 0, 255)
y = _normalize(y)
color_pools.append(self.inception_pool(y))
file_names = einops.rearrange(batch["file_name"], "d b -> (d b)")
color_file_names.update([f.decode() for f in file_names])
logging.log_every_n_seconds(
logging.INFO,
"ColTran FID eval: processed %i colorized examples so far.", 30,
(i + 1) * self.device_batch_size * self.num_devices)
reference_pools = []
reference_file_names = set()
for i, batch in enumerate(self.reference_data.as_numpy_iterator()):
image = batch["image"]
assert np.array_equal(image.shape, (self.device_batch_size, 256, 256, 3))
reference_pools.append(self.inception_pool(image))
reference_file_names.update([f.decode() for f in batch["file_name"]])
logging.log_every_n_seconds(
logging.INFO,
"ColTran FID eval: processed %i reference examples so far.", 30,
(i + 1) * self.device_batch_size)
if color_file_names != self.eval_file_names:
raise ValueError("unknown: {}\nmissing: {}".format(
color_file_names - self.eval_file_names,
self.eval_file_names - color_file_names))
if reference_file_names != self.reference_file_names:
raise ValueError("unknown: {}\nmissing: {}".format(
reference_file_names - self.reference_file_names,
self.reference_file_names - reference_file_names))
color = np.concatenate(color_pools, axis=0)
reference = np.concatenate(reference_pools, axis=0)
if color.shape[0] != 5_000:
raise ValueError(color.shape)
if reference.shape[0] != 5_000:
raise ValueError(reference.shape)
yield "FID_5k", tfgan.eval.frechet_classifier_distance_from_activations(
color, reference)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator to save predictions."""
# pylint: disable=consider-using-from-import
import os
from absl import flags
from absl import logging
import big_vision.evaluators.proj.uvim.common as common
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import jax
import numpy as np
import tensorflow as tf
class Evaluator:
"""Save predictions in "{FLAGS.workdir}/{outfile}".
Results can then be easily inspected in a notebook such as:
```
results = utils.load_checkpoint(None, "<full_path_to_outfile>")
inputs, outputs = (results["inputs"], results["outputs"])
```
"""
def __init__(self, predict_fn, pp_fn, dataset, split, batch_size, outfile,
predict_kwargs=None, dataset_dir=None):
# Prepare to run predict on all processes and gather predictions on all
# devices. Note: if needed consider only gather across processes.
def predict(params, batch):
y = predict_fn(params, batch['inputs'], **(predict_kwargs or {}))
res = {'inputs': batch['inputs'], 'outputs': y, 'mask': batch['mask']}
return jax.lax.all_gather(res, axis_name='data', axis=0, tiled=True)
self.predict_fn = jax.pmap(predict, axis_name='data')
# Prepare data for each process and pad with zeros so all processes have the
# same number of batches.
def preprocess(example):
return {
'mask': tf.constant(1),
'inputs': pp_builder.get_preprocess_fn(pp_fn)(example),
}
self.data = common.get_jax_process_dataset(
dataset=dataset, split=split,
dataset_dir=dataset_dir,
global_batch_size=batch_size,
pp_fn=preprocess)
self.path = os.path.join(flags.FLAGS.workdir, outfile)
def run(self, params):
"""Compute all predictions, gather in main host and save in outfile."""
count = 0
outputs = []
for batch in self.data.as_numpy_iterator():
out = self.predict_fn(params, batch)
if jax.process_index():
continue
out = jax.device_get(jax.tree_map(lambda x: x[0], out))
out = jax.tree_map(lambda x: x[out['mask'] == 1], out) # pylint: disable=cell-var-from-loop
count += out['mask'].shape[0]
out.pop('mask')
outputs.append(out)
logging.log_every_n_seconds(
logging.INFO, 'Save predictions: processed %i examples so far.', 30,
count)
if jax.process_index():
return
logging.info('Save predictions: processed %d examples.', count)
# Actually save in filesystem.
outputs = jax.tree_map(lambda *x: np.concatenate(x, axis=0), *outputs)
u.save_checkpoint(outputs, self.path, compressed=True)
return
yield None # pylint: disable=unreachable
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities used in evaluators."""
import math
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
def get_jax_process_dataset(dataset, split, global_batch_size, pp_fn,
dataset_dir=None, cache=True, add_tfds_id=False):
"""Returns dataset to be processed by current jax host.
The dataset is sharded and padded with zeros such that all processes
have equal number of batches. The first 2 dimensions of the dataset
elements are: [local_device_count, device_batch_size].
Args:
dataset: dataset name.
split: dataset split.
global_batch_size: batch size to be process per iteration on the dataset.
pp_fn: preprocessing function to apply per example.
dataset_dir: path for tfds to find the prepared data.
cache: whether to cache the dataset after batching.
add_tfds_id: whether to add the unique `tfds_id` string to each example.
"""
assert global_batch_size % jax.device_count() == 0
total_examples = tfds.load(
dataset, split=split, data_dir=dataset_dir).cardinality()
num_batches = math.ceil(total_examples / global_batch_size)
process_split = tfds.even_splits(
split, n=jax.process_count(), drop_remainder=False)[jax.process_index()]
data = tfds.load(
dataset,
split=process_split,
data_dir=dataset_dir,
read_config=tfds.ReadConfig(add_tfds_id=add_tfds_id)).map(pp_fn)
pad_data = tf.data.Dataset.from_tensors(
jax.tree_map(lambda x: tf.zeros(x.shape, x.dtype), data.element_spec)
).repeat()
data = data.concatenate(pad_data)
data = data.batch(global_batch_size // jax.device_count())
data = data.batch(jax.local_device_count())
data = data.take(num_batches)
if cache:
# Eval datasets are often used many times and caching the dataset after
# batching allows one to have the buffers ready to be used and not have
# to wait for preprocessing to be done over and over.
data = data.cache()
return data
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute PSNR, currently used for colorization and superresolution."""
import functools
import big_vision.evaluators.proj.uvim.common as common
import big_vision.pp.builder as pp_builder
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
class Evaluator:
"""PSNR evaluator.
`predict_fn` accepts arbitrary dictionaries of parameters and data, where
the data dictionary is produced by the `pp_fn` op. It is expected to output a
single-key dict containing an RGB image with intensities in [-1,1].
"""
def __init__(self,
predict_fn,
pp_fn,
batch_size,
dataset="imagenet2012",
split="validation",
predict_kwargs=None):
def predict(params, batch):
def _f(x):
y = predict_fn(params, x, **(predict_kwargs or {}))
# Assume image intensities are in [-1,1].
# Evaluator expects a dict with a single item.
pred, = y.values()
return _psnr(pred, x["labels"], 2.)
return jax.lax.all_gather({
"mask": batch["mask"],
"psnr": _f(batch["input"]),
}, axis_name="data", axis=0)
self.predict_fn = jax.pmap(predict, axis_name="data")
# Prepare data for each process and pad with zeros so all processes have the
# same number of batches.
def preprocess(example):
return {
"mask": tf.constant(1),
"input": pp_builder.get_preprocess_fn(pp_fn)(example),
}
self.data = common.get_jax_process_dataset(
dataset,
split,
global_batch_size=batch_size,
add_tfds_id=True,
pp_fn=preprocess)
def run(self, params):
"""Run eval."""
psnrs = []
for batch in self.data.as_numpy_iterator():
# Outputs is a dict with values shaped (gather/same, devices, batch, ...)
out = self.predict_fn(params, batch)
if jax.process_index(): # Host0 gets all preds and does eval.
continue
# First, we remove the "gather" dim and transfer the result to host,
# leading to numpy arrays of (devices, device_batch, ...)
out = jax.tree_map(lambda x: jax.device_get(x[0]), out)
mask = out["mask"]
batch_psnrs = out["psnr"][mask != 0]
psnrs.extend(batch_psnrs)
if jax.process_index(): # Host0 gets all preds and does eval.
return
yield "PSNR", np.mean(psnrs)
@functools.partial(jax.vmap, in_axes=[0, 0, None])
def _psnr(img0, img1, dynamic_range):
mse = jnp.mean(jnp.power(img0 - img1, 2))
return 20. * jnp.log10(dynamic_range) - 10. * jnp.log10(mse)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO17 panoptic evaluation."""
import functools
from functools import partial
import json
import os
import tempfile
import time
import zipfile
from absl import logging
from big_vision.evaluators.proj.uvim import common
import big_vision.pp.builder as pp_builder
import jax
import numpy as np
import panopticapi_converters.twochannels2panoptic_coco_format as converter
from panopticapi.evaluation import pq_compute
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.io import gfile
ROOT = os.environ.get('COCO_DATA_DIR', '.')
PANOPTIC_COCO_CATS_FILE = f'{ROOT}/panoptic_coco_categories.json'
PANOPTIC_2017 = {
'train': f'{ROOT}/panoptic_train2017.json',
'validation': f'{ROOT}/panoptic_val2017.json',
}
PANOPTIC_GT_ZIP = {
'train': f'{ROOT}/panoptic_train2017.zip',
'validation': f'{ROOT}/panoptic_val2017.zip',
}
class Evaluator:
"""Panoptic segmentation evaluator: calls official COCO API.
`predict_fn` accepts arbitrary dictionaries of parameters and data, where
the data dictionary is produced by the `pp` op. It is expected to output a
2-channel mask, where the first channel encodes semantics, and the second
channel encodes instance ids.
"""
def __init__(self,
predict_fn,
pp_fn,
batch_size,
dataset='coco/2017_panoptic',
dataset_dir=None,
split='validation',
predict_kwargs=None):
# Prepare to run predict on all processes and gather predictions on all
# devices. Note: if needed consider only gather across processes.
def predict(params, batch):
res = {
'image/id': batch['image/id'],
'mask': batch['mask'],
'y': predict_fn(params, batch['input'], **(predict_kwargs or {})),
}
return jax.lax.all_gather(res, axis_name='data', axis=0)
self.predict_fn = jax.pmap(predict, axis_name='data')
# Prepare data for each process and pad with zeros so all processes have the
# same number of batches.
def preprocess(example):
return {
'image/id': example['image/id'],
'mask': tf.constant(1),
'input': pp_builder.get_preprocess_fn(pp_fn)(example),
}
self.data = common.get_jax_process_dataset(
dataset, split, dataset_dir=dataset_dir,
global_batch_size=batch_size,
pp_fn=preprocess)
# Only process 0 runs conversion to png and calls into coco api.
if jax.process_index() == 0:
self.result_dir = tempfile.TemporaryDirectory()
(self.gt_folder, self.gt_json, self.categories_json,
self.remap, self.size_map) = _prepare_ground_truth(
dataset, split, dataset_dir)
def _compute_png_predictions(self, params):
"""Computes predictions and converts then to png to optimize memory use."""
count = 0
logging.info('Panoptic eval: running inference.')
for batch in self.data.as_numpy_iterator():
out = self.predict_fn(params, batch)
if jax.process_index():
continue
out = jax.device_get(jax.tree_map(lambda x: x[0], out))
mask = out['mask']
pan_recs = out['y'][mask != 0]
ids = out['image/id'][mask != 0]
for pan_rec, image_id in zip(pan_recs, ids):
sem = pan_rec[..., 0]
ins = pan_rec[..., 1]
sem_remapped = np.array(sem)
for v in np.unique(sem):
sem_remapped[sem == v] = self.remap[v]
sem = sem_remapped
pan_mask = np.stack([sem, ins, np.zeros_like(sem)], axis=-1)
pan_mask = _resize_nearest(pan_mask, self.size_map[image_id])
pan_mask_png = tf.io.encode_png(pan_mask.astype('uint8')).numpy()
fname = f'{self.result_dir.name}/{image_id:012d}.png'
with open(fname, 'wb') as f:
f.write(pan_mask_png)
count += 1
logging.log_every_n_seconds(
logging.INFO, 'Panoptic eval: processed %i examples so far.', 30,
count)
if jax.process_index():
return None
logging.info('Panoptic eval: inference done. Processed %d examples.', count)
return self.result_dir
def run(self, params):
"""Run eval."""
# Note result_dir is constant, but files inside are mutated.
result_dir = self._compute_png_predictions(params)
if not result_dir:
return
with tempfile.TemporaryDirectory() as pred_folder, \
tempfile.NamedTemporaryFile(mode='w') as pred_json:
logging.info('Panoptic eval: running conversion.')
converter.converter(
source_folder=result_dir.name,
images_json_file=self.gt_json,
categories_json_file=self.categories_json,
segmentations_folder=pred_folder,
predictions_json_file=pred_json.name)
logging.info('Panoptic eval: conversion done.')
logging.info('Panoptic eval: running metrics computation.')
res = pq_compute(gt_json_file=self.gt_json,
gt_folder=self.gt_folder,
pred_json_file=pred_json.name,
pred_folder=pred_folder)
logging.info('Panoptic eval: metrics computation done.')
for k in ['All', 'Stuff', 'Things']:
for m in ['pq', 'rq', 'sq']:
yield f'{k}_{m}', res[k][m]
def _prepare_ground_truth(dataset, split, data_dir):
"""Prepare ground truth from tf.data.Dataset."""
if dataset == 'coco/2017_panoptic' and data_dir is None:
return _prepare_ground_truth_from_zipfiles(split)
else:
return _prepare_ground_truth_from_dataset(dataset, split, data_dir)
@functools.lru_cache(maxsize=None)
def _prepare_ground_truth_from_dataset(dataset, split, data_dir):
"""Prepare ground truth from a tf.data.Dataset."""
dataset = tfds.builder(dataset, data_dir=data_dir).as_dataset(split=split)
categories_json = _make_local_copy(PANOPTIC_COCO_CATS_FILE)
with gfile.GFile(categories_json, 'rb') as f:
categories = json.loads(f.read())
# Build map from tfds class ids to COCO class ids.
remap = {0: 0}
with gfile.GFile(categories_json, 'r') as f:
remap = {**remap, **{(i + 1): x['id'] for i, x in enumerate(categories)}}
gt_folder = tempfile.mkdtemp()
gfile.makedirs(gt_folder)
size_map = {}
annotations = []
images = []
for example in dataset:
image_id = int(example['image/id'])
panoptic_image = example['panoptic_image']
ann_ids = example['panoptic_objects']['id']
ann_labels = example['panoptic_objects']['label']
ann_iscrowd = example['panoptic_objects']['is_crowd']
ann_area = example['panoptic_objects']['area']
fname = f'{image_id:012d}.png'
with gfile.GFile(os.path.join(gt_folder, fname), 'wb') as f:
f.write(tf.io.encode_png(panoptic_image).numpy())
size_map[image_id] = (panoptic_image.shape[0], panoptic_image.shape[1])
segments_info = []
for i in range(len(ann_ids)):
segments_info.append({
'id': int(ann_ids[i]),
'category_id': remap[int(ann_labels[i] + 1)],
'iscrowd': int(ann_iscrowd[i]),
'area': int(ann_area[i]),
})
annotations.append({
'file_name': str(fname),
'image_id': int(image_id),
'segments_info': segments_info
})
images.append({
'id': image_id,
'file_name': f'{image_id:012d}.jpg',
})
# Write annotations.json needed for pq_compute.
gt_json = os.path.join(gt_folder, 'annotations.json')
with gfile.GFile(gt_json, 'wb') as f:
f.write(json.dumps({
'images': images,
'annotations': annotations,
'categories': categories,
}))
return gt_folder, gt_json, categories_json, remap, size_map
def _prepare_ground_truth_from_zipfiles(split):
"""Prepare ground truth from coco zip files."""
split_prefix = split.split('[')[0]
if split_prefix not in ('train', 'validation'):
raise ValueError(f'Split {split} not supported')
# The following 4 calls are cached. This allows to save significant time
# in use cases like sweeping predict_fn hparams on the same run.
gt_json = _make_local_copy(PANOPTIC_2017[split_prefix])
gt_folder = _make_local_unzip_copy(PANOPTIC_GT_ZIP[split_prefix])
categories_json = _make_local_copy(PANOPTIC_COCO_CATS_FILE)
image_ids = _list_image_ids('coco/2017_panoptic', split)
gt_folder = os.path.join(
gt_folder, 'panoptic_val2017'
if split_prefix == 'validation' else 'panoptic_train2017')
# Build map from tfds class ids to COCO class ids.
remap = {0: 0}
with gfile.GFile(categories_json, 'r') as f:
remap = {**remap, **{(i + 1): x['id'] for i, x in enumerate(json.load(f))}}
# Filters gt_json to contain only annotations for images in dataset.
with gfile.GFile(gt_json) as f:
data = json.load(f)
logging.info(
'Panoptic eval: pre-filter %d annotations.',
len(data['annotations'])
)
data['images'] = [x for x in data['images'] if x['id'] in image_ids]
data['annotations'] = [
x for x in data['annotations'] if x['image_id'] in image_ids
]
logging.info(
'Panoptic eval: post-filter %d annotations.',
len(data['annotations'])
)
filtered_gt_json = tempfile.NamedTemporaryFile(delete=False).name
with open(filtered_gt_json, 'w') as f:
json.dump(data, f)
# Precompute images sizes.
size_map = {x['id']: (x['height'], x['width']) for x in data['images']}
return gt_folder, filtered_gt_json, categories_json, remap, size_map
@functools.lru_cache(maxsize=None)
def _list_image_ids(dataset, split):
d = tfds.load(dataset, split=split).map(lambda x: x['image/id'])
return frozenset(d.as_numpy_iterator())
@functools.lru_cache(maxsize=None)
def _make_local_copy(fname) -> str:
start = time.monotonic()
local_file = tempfile.NamedTemporaryFile(delete=False)
gfile.copy(fname, local_file.name, overwrite=True)
logging.info('Copy %s in %d seconds.', fname, time.monotonic() - start)
return local_file.name
@functools.lru_cache(maxsize=None)
def _make_local_unzip_copy(fname) -> str:
start = time.monotonic()
folder = tempfile.mkdtemp()
with tempfile.NamedTemporaryFile() as tmp_zip_file:
gfile.copy(fname, tmp_zip_file.name, overwrite=True)
with zipfile.ZipFile(tmp_zip_file.name, 'r') as f:
f.extractall(folder)
logging.info('Copy %s in %d seconds.', fname, time.monotonic() - start)
return folder
@partial(jax.jit, static_argnums=(1,), backend='cpu')
def _resize_nearest(image, shape):
return jax.image.resize(image, shape + image.shape[-1:], 'nearest')
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation for NYU depth.
At evaluation time the ground truth is cropped and clipped. Values outside of
the test crop or clipping range are not included in eval calculations.
In this evaluator, it is assume that the groud truth is already cropped, so the
entire image is evaluated. However, the evaluator does perform the clipping.
Reference implementations:
https://github.com/zhyever/Monocular-Depth-Estimation-Toolbox/blo(internal link)a0f341244260ff61541191a613dd74bc/depth/datasets/nyu.py
https://github.com/vinvino02/GLPDepth/blob/7f3c78df4ecd6e7c79fd0c4b73c95d61f4aa2121/code/utils/metrics.py
https://github.com/shariqfarooq123/AdaBins/blob/2fb686a66a304f0a719bc53d77412460af97fd61/evaluate.py
"""
import functools
import big_vision.evaluators.proj.uvim.common as common
import big_vision.pp.builder as pp_builder
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
EVAL_CROP_H = 426
EVAL_CROP_W = 560
class Evaluator:
"""Evaluator for NYU depth."""
def __init__(self,
predict_fn,
pp_fn,
batch_size,
dataset,
split,
min_depth=1e-3,
max_depth=10,
dataset_dir=None,
predict_kwargs=None):
self.min_depth = min_depth
self.max_depth = max_depth
def predict(params, batch):
pred = predict_fn(params, batch, **(predict_kwargs or {}))
return jax.lax.all_gather({
"mask": batch["mask"],
"gt": jnp.squeeze(batch["ground_truth"], axis=-1),
"y": pred["depth"],
}, axis_name="data", axis=0)
self.predict_fn = jax.pmap(predict, axis_name="data")
# Prepare data for each process and pad with zeros so all processes have the
# same number of batches.
def preprocess(example):
return {
"mask": tf.constant(1),
**pp_builder.get_preprocess_fn(pp_fn)(example),
}
self.process_batch_size = batch_size // jax.process_count()
self.data = common.get_jax_process_dataset(
dataset=dataset,
dataset_dir=dataset_dir,
split=split,
global_batch_size=batch_size,
pp_fn=preprocess)
def run(self, params):
"""Run eval."""
# Assumes that the ground truth is processed by the eval crop.
eval_mask = np.ones((EVAL_CROP_H, EVAL_CROP_W), dtype=np.bool_)
rmses = []
abs_res = []
abs_logs = []
d1s = []
d2s = []
d3s = []
for batch in self.data.as_numpy_iterator():
# Outputs is a dict with values shaped (gather/same, devices, batch, ...)
out = self.predict_fn(params, batch)
if jax.process_index(): # Host0 gets all preds and does eval.
continue
# First, we remove the "gather" dim and transfer the result to host,
# leading to numpy arrays of (devices, device_batch, ...)
out = jax.tree_map(lambda x: jax.device_get(x[0]), out)
# Then the bool-indexing with mask resulting in flat (global_batch, ...)
out = jax.tree_map(lambda x: x[out["mask"] == 1], out) # pylint:disable=cell-var-from-loop
for gt, pred in zip(out["gt"], out["y"]):
pred = _resize_nearest(pred, (EVAL_CROP_H, EVAL_CROP_W))
valid_mask = np.logical_and(gt > self.min_depth, gt < self.max_depth)
valid_mask = np.logical_and(valid_mask, eval_mask)
rmses.append(_compute_rmse(gt[valid_mask], pred[valid_mask]))
abs_res.append(_compute_abs_re(gt[valid_mask], pred[valid_mask]))
abs_logs.append(_compute_abs_log(gt[valid_mask], pred[valid_mask]))
d1s.append(_compute_delta(gt[valid_mask], pred[valid_mask], order=1))
d2s.append(_compute_delta(gt[valid_mask], pred[valid_mask], order=2))
d3s.append(_compute_delta(gt[valid_mask], pred[valid_mask], order=3))
if jax.process_index(): # Host0 gets all preds and does eval.
return
yield "RMSE", np.mean(rmses)
yield "abs_RE", np.mean(abs_res)
yield "log10", np.mean(abs_logs)
yield "delta1", np.mean(d1s)
yield "delta2", np.mean(d2s)
yield "delta3", np.mean(d3s)
@functools.partial(jax.jit, static_argnums=(1,), backend="cpu")
def _resize_nearest(image, shape):
return jax.image.resize(image, shape, "nearest")
def _compute_rmse(gt, pred):
diff = gt - pred
return np.sqrt(np.mean(np.power(diff, 2)))
def _compute_abs_re(gt, pred):
diff = np.abs(gt - pred)
return np.mean(diff / gt)
def _compute_abs_log(gt, pred):
diff = np.abs(np.log10(gt) - np.log10(pred))
return np.mean(diff)
def _compute_delta(gt, pred, order):
rel_diff = np.maximum(gt / pred, pred / gt)
return np.sum(rel_diff < 1.25**order) / rel_diff.size
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator for computing mean of per-example metrics."""
import functools
from typing import Mapping
from big_vision import input_pipeline
from big_vision.datasets import core as ds_core
from big_vision.pp import builder as pp_builder
import jax
import jax.numpy as jnp
import numpy as np
# Note: global to avoid jax re-compiling across different evaluator instances.
@functools.partial(jax.pmap, static_broadcasted_argnums=0, axis_name='batch')
def _run_predict_fn(predict_fn, params, batch):
"""Sum per-example metrics weighted by `_mask`."""
mask = batch['_mask']
metrics = predict_fn(params, batch)
# Sanity check output format of predict_fn.
assert isinstance(metrics, Mapping), 'predict_fn must return a dict'
for y in jax.tree_leaves(metrics):
if y.shape != mask.shape:
raise ValueError(
f'Expected per-example metrics of shape {mask.shape} found '
f'{jax.tree_map(lambda x: x.shape, metrics)}.')
metrics = {**metrics, '_mask': mask}
metrics = jax.tree_map(lambda x: jnp.inner(x, mask), metrics)
return jax.lax.psum(metrics, axis_name='batch')
class Evaluator:
"""Report the mean of per-example metrics computed by predict_fn.
`predict_fn(params, batch)` must return a dict from metric name to
per-example metrics of shape [batch_size].
"""
def __init__(self, predict_fn, data, pp_fn, batch_size,
cache_final=True, cache_raw=False, prefetch=1):
data = ds_core.get(**data)
self.dataset, self.steps = input_pipeline.make_for_inference(
data.get_tfdata(ordered=True), batch_size=batch_size,
num_ex_per_process=data.num_examples_per_process(),
preprocess_fn=pp_builder.get_preprocess_fn(pp_fn),
cache_final=cache_final, cache_raw=cache_raw)
self.data_iter = input_pipeline.start_input_pipeline(self.dataset, prefetch)
self.predict_fn = predict_fn
def run(self, params):
"""Computes all metrics."""
metrics = []
# Compute batch metrics without blocking.
for _, batch in zip(range(self.steps), self.data_iter):
batch_metrics = _run_predict_fn(self.predict_fn, params, batch)
metrics.append(batch_metrics)
# Transfer metrics from device 0 to host (blocking).
metrics = jax.device_get(jax.tree_map(lambda x: x[0], metrics))
metrics_sum = jax.tree_map(lambda *x: np.sum(x), *metrics)
mask_sum = metrics_sum.pop('_mask')
for key, value_sum in metrics_sum.items():
yield (key, value_sum / mask_sum)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discriminative zero-shot classification evaluator.
"""
import functools
import time
from absl import logging
from big_vision.evaluators.proj.image_text import prompt_engineering
from big_vision.pp import ops_general # pylint: disable=unused-import
from big_vision.pp import ops_image # pylint: disable=unused-import
import big_vision.pp.builder as pp_builder
import flax
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
DATASET_NAMES = ("imagenet2012", "cifar100", "oxford_iiit_pet")
DEFAULT_OVERRIDES = (
("imagenet2012", (
("class_names", "clip"),
("split", "validation"),
)),
)
def _with_infinite_padding(dataset):
"""Adds "infinite padding" to the dataset."""
filler_element = tf.nest.map_structure(
lambda spec: tf.zeros(spec.shape, spec.dtype)[None], dataset.element_spec)
filler_element["mask"] = [False]
filler_dataset = tf.data.Dataset.from_tensor_slices(filler_element)
dataset = dataset.map(
lambda features: dict(mask=True, **features),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.concatenate(filler_dataset.repeat(None))
def _pad_and_batch(dataset, batch_dims):
"""Adds padding and then batches dataset."""
dataset = _with_infinite_padding(dataset)
for batch_size in reversed(batch_dims):
dataset = dataset.batch(batch_size)
return dataset
# This is needed so retrieval_test can replace dataset info.
def _get_dataset_info(builder):
return builder.info
def prepare_datasets(img_dataset,
class_names,
*,
prompt_templates,
pp_img,
pp_txt,
cache_final=False,
filter_fn=None,
class_name_offset=0):
"""Returns unbatched `ds_images, ds_texts` datasets."""
assert prompt_templates, "Must specify prompt templates (e.g. simply ['{}'])"
def expand_aliases(idx, class_name):
class_names = tf.strings.split(class_name, ",")
return tf.data.Dataset.from_tensor_slices((
tf.repeat([idx + class_name_offset], len(class_names), axis=0),
class_names,
))
def add_prompts(idx, class_name):
return tf.data.Dataset.from_tensor_slices({
"label": tf.repeat([idx], len(prompt_templates), axis=0),
"class_name": tf.repeat([class_name], len(prompt_templates), axis=0),
"prompt_template": prompt_templates,
})
def substitute_prompt(features):
parts = tf.strings.split(features["prompt_template"], "{}")
tf.debugging.assert_equal(len(parts), 2, features["prompt_template"])
return {
"label": features["label"],
"texts": tf.strings.join([parts[0], features["class_name"], parts[1]])
}
if filter_fn:
img_dataset = img_dataset.filter(filter_fn)
ds_images = img_dataset.map(
pp_builder.get_preprocess_fn(f"{pp_img}|keep('label', 'image')"))
ds_texts = tf.data.Dataset.from_tensor_slices(list(class_names)).enumerate(
).flat_map(expand_aliases).flat_map(add_prompts).map(substitute_prompt).map(
pp_builder.get_preprocess_fn(f"{pp_txt}|keep('label', 'labels')"))
if cache_final:
ds_images, ds_texts = ds_images.cache(), ds_texts.cache()
return ds_images, ds_texts
def _split_and_batch(dataset_name, data_dir, class_names, batch_size, split,
get_ds):
"""Splits dataset, calls `get_ds` and returns padded + batched datasets."""
assert not batch_size % jax.device_count(), (
f"batch_size={batch_size} % jax.device_count()={jax.device_count()}")
builder = tfds.builder(dataset_name, data_dir=data_dir)
batch_dims = [
jax.local_device_count(), batch_size // jax.device_count()
]
# Split class names (last process gets remainder).
if len(class_names) < jax.process_count():
# See cl/442751961 for more details.
class_names += [""] * (jax.process_count() - len(class_names))
per_process = len(class_names) // jax.process_count()
class_name_offset = per_process * jax.process_index()
if jax.process_index() == jax.process_count() - 1:
class_names = class_names[class_name_offset:]
else:
class_names = class_names[class_name_offset:class_name_offset + per_process]
ds_images, ds_texts = get_ds(
builder.as_dataset(split=tfds.split_for_jax_process(split)),
class_names,
class_name_offset=class_name_offset)
return (
_pad_and_batch(ds_images, batch_dims),
_pad_and_batch(ds_texts, batch_dims),
)
def _average_embeddings(embeddings, *, labels, num_classes, normalize):
"""Computes per-class averages of `embeddings`."""
assert embeddings.ndim == 2, f"Expected {embeddings.ndim}==2"
assert labels.ndim == 1, f"Expected {labels.ndim}==1"
assert len(labels) == len(embeddings), (
f"Expected {len(labels)}=={len(embeddings)}")
byidx = [[] for _ in range(num_classes)]
for label, embedding in zip(labels, embeddings):
byidx[label].append(embedding)
missing = set(range(num_classes)) - set(
idx for idx, embs in enumerate(byidx) if len(embs))
assert not missing, f"Classes without embeddings: {missing}"
embeddings = [np.array(embedding).mean(axis=0) for embedding in byidx]
embeddings = np.stack(embeddings)
assert len(embeddings) == num_classes
if normalize:
embeddings /= 1e-8 + np.linalg.norm(embeddings, axis=1, keepdims=True)
return embeddings
class Evaluator:
"""Zero-shot classification evaluator."""
def __init__(self,
predict_fn,
*,
batch_size,
dataset_names=DATASET_NAMES,
data_dir=None,
class_names="dataset_info:label",
split="test",
prompt_templates="clip_paper",
canonicalize=True,
pp_img="resize(224)|value_range(-1,1)",
pp_txt="tokenize(max_len=16, eos='sticky', "
"pad_value=1, inkey='texts', outkey='labels')",
cache_final=False,
filter_fn=None,
first_class_name_only=True,
batched_features_transform=lambda x: x,
dataset_overrides=DEFAULT_OVERRIDES,
async_delay=1):
"""Initializes a new zero-shot classification evaluator.
See `prepare_datasets()` for details on how the dataset is pre-processed.
Args:
predict_fn: Prediction function with signature
`zimg, ztxt, out = predict_fn(params, images, texts)`
batch_size: Global batch size.
dataset_names: Names of TFDS datasets to evaluate on.
data_dir: Optional argument to `tfds.builder()`.
class_names: Usually specified as a string that is interpreted by
`prompt_engineering.get_class_names()` to look up class names.
Alternatively, this attribute can be a list of class names (using ","
to separate multiple aliases).
split: Which dataset split to use for evaluation.
prompt_templates: Specifies which prompt templates to use. See module
big_vision.evaluators.proj.image_text.prompte_engineering
for valid values.
canonicalize: Whether class names and prompt templates should be
canonicalized. See `prompt_engineering.py` for details.
pp_img: Preprocessing string for images. Preprocessed features should
contain key "image" with value that can be batched and is suitable for
the `images` argument of `predict_fn` input``.
pp_txt: Preprocessing string for texts. Can expect "texts" key as an input
(shape=[], dtype=string), and is expected to produce "labels" key that
is suitable for the `text` argument of `predict_fn` input.
cache_final: Wether preprocesse dataset should be cached.
filter_fn: Predicate to be applied to the dataset for filtering records.
first_class_name_only: Whether only the first class name should be
considered (i.e. not using any aliases).
batched_features_transform: Function that is applied to the batched
features before embedding the texts/images.
dataset_overrides: Mapping `dataset_name` to an optional dictionary that
can override parameters `dataset_name`, `data_dir`, `pp_img`, `pp_txt`,
`class_names`, `split`, `filter_fn`, and the extra
`class_names_dataset_name`.
Works with tuple/dict of tuples/dicts.
async_delay: How many steps to wait before checking if all hosts have
finished their batch. A value > 1 allows for more parallelized
processing, but will results in more unnecessary steps with padded data.
"""
t0 = time.monotonic()
self.datasets = {}
self.prompt_templates = prompt_engineering.get_prompt_templates(
prompt_templates, canonicalize=canonicalize)
self._axis_name = "batch"
dataset_overrides = {k: dict(v) for k, v in dict(dataset_overrides).items()}
for dataset_name in dataset_names:
overrides = dataset_overrides.pop(dataset_name, {})
dataset_name_ = overrides.pop("dataset_name", dataset_name)
data_dir_ = overrides.pop("data_dir", data_dir)
class_names_dataset_name = overrides.pop("class_names_dataset_name",
dataset_name_)
class_names_ = overrides.pop("class_names", class_names)
class_names_ = prompt_engineering.get_class_names(
dataset_name=class_names_dataset_name,
source=class_names_,
canonicalize=canonicalize)
pp_img_ = overrides.pop("pp_img", pp_img)
pp_txt_ = overrides.pop("pp_txt", pp_txt)
cache_final_ = overrides.pop("cache_final", cache_final)
split_ = overrides.pop("split", split)
filter_fn_ = overrides.pop("filter_fn", filter_fn)
assert not overrides, f"Unknown overrides {dataset_name}: {overrides}"
if first_class_name_only:
class_names_ = [name.split(",")[0] for name in class_names_]
ds_images, ds_texts = _split_and_batch(
dataset_name=dataset_name_,
data_dir=data_dir_,
class_names=class_names_,
batch_size=batch_size,
split=split_,
get_ds=functools.partial(
prepare_datasets,
pp_img=pp_img_,
pp_txt=pp_txt_,
cache_final=cache_final_,
filter_fn=filter_fn_,
prompt_templates=self.prompt_templates))
self.datasets[dataset_name] = dict(
images=ds_images, texts=ds_texts, class_names=class_names_,
dataset_name=dataset_name_, split=split_)
assert not dataset_overrides, f"Extra overrides: {dataset_overrides}"
def embed_texts(params, texts):
"""Returns text embeddings."""
_, ztxt, _ = predict_fn(params, None, texts)
return jnp.concatenate(
jax.lax.all_gather(ztxt, axis_name=self._axis_name), axis=0)
def count_correct(params, return_embeddings, *, mask, labels, image, ztxt):
"""Returns count of correct predictions (and optionally embeddings)."""
zimg, _, _ = predict_fn(params, image, None)
best_txt = (zimg @ ztxt.T).argmax(axis=1)
# labels has format [[1, -1, -1], [5, -1, -1], [7, 2, -1], ...]
# so here we count "any" correct, such that the counting matches the
# multilabel scenario described in "are we done with imagenet"
# (http://arxiv.org/abs/2006.07159) section 3.1
assert labels.ndim == 2, labels.shape
matching = (best_txt[:, None] == labels).sum(axis=1)
correct = jnp.where(mask, (matching > 0).astype(jnp.int32), 0).sum()
correct = jax.lax.psum(correct, axis_name=self._axis_name)
if return_embeddings:
zimg = jnp.concatenate(
jax.lax.all_gather(zimg, axis_name=self._axis_name), axis=0)
else:
zimg = None
return correct, zimg
def gather_concatenate(x):
"""Gathers data from all hosts (for use with `embed_texts()`)."""
return jnp.concatenate(
jax.lax.all_gather(x, axis_name=self._axis_name), axis=0)
self._embed_texts_p = jax.pmap(
embed_texts, axis_name=self._axis_name)
self._count_correct_p = jax.pmap(
count_correct, axis_name=self._axis_name, static_broadcasted_argnums=1)
self._count_p = jax.pmap(
lambda mask: jax.lax.psum(mask.sum(), axis_name=self._axis_name),
axis_name=self._axis_name)
self._gather_concatenate_p = jax.pmap(
gather_concatenate, axis_name=self._axis_name)
self._compiled = set()
assert async_delay > 0, f"async_delay must be >0, not {async_delay}"
self._async_delay = async_delay
self._batched_features_transform = batched_features_transform
logging.info("Initialized evaluator in %.1f seconds", time.monotonic() - t0)
def _embed_texts(self, params, dataset_name):
"""Returns per-class averaged text embeddings."""
t0 = time.monotonic()
logging.info("Starting text embedding...")
ns = []
embeddings = []
data = {"label": [], "mask": []}
for batch in self.datasets[dataset_name]["texts"]:
batch = self._batched_features_transform(batch)
batch = jax.tree_map(lambda x: np.asarray(memoryview(x)), batch)
ns.append(self._count_p(batch["mask"])[0])
if len(ns) >= self._async_delay and ns[-self._async_delay] == 0:
break
embeddings.append(self._embed_texts_p(params, batch["labels"])[0])
for name in data:
data[name].append(self._gather_concatenate_p(batch[name])[0])
if self._embed_texts_p not in self._compiled:
logging.info("Compiled text embeddings in %.1fs", time.monotonic() - t0)
t0 = time.monotonic()
self._compiled.add(self._embed_texts_p)
ns = np.array(ns)
n = ns.sum()
data["embedding"] = embeddings
data = {k: np.concatenate(v, axis=0) for k, v in data.items()}
mask = data.pop("mask").astype(bool)
data = {k: v[mask] for k, v in data.items()}
data["average_embedding"] = _average_embeddings(
data["embedding"],
labels=data["label"],
num_classes=len(self.datasets[dataset_name]["class_names"]),
normalize=True)
logging.info("Embedded %s text in %d steps - ...%s", dataset_name, len(ns),
ns[-10:])
logging.info("Totalling %d text in %.1fs", n, time.monotonic() - t0)
logging.info("Total texts embeddings size %.1fM",
data["embedding"].nbytes / 1e6)
return data
def evaluate(self,
params,
dataset_name,
*,
return_embeddings=False):
"""Returns evaluation results."""
texts = self._embed_texts(params, dataset_name)
ztxt_p = flax.jax_utils.replicate(texts["average_embedding"])
t0 = time.monotonic()
logging.info("Starting image embedding...")
ns = []
embeddings = []
corrects = []
data = {"mask": [], "label": []} if return_embeddings else {}
for batch in self.datasets[dataset_name]["images"]:
batch = self._batched_features_transform(batch)
batch = jax.tree_map(lambda x: np.asarray(memoryview(x)), batch)
# Due to infinite padding, this loop will never end. We will stop once
# all processes only process padded data. Checking ns[-k] instead of
# ns[-1] allows us to tune additional steps vs. interleaved processing.
ns.append(self._count_p(batch["mask"])[0])
if len(ns) >= self._async_delay and ns[-self._async_delay] == 0:
break
labels = batch["label"]
if labels.ndim == 2:
labels = labels[..., None]
assert labels.ndim == 3
correct_p, embs_p = self._count_correct_p(
params,
return_embeddings,
mask=batch["mask"],
labels=labels,
image=batch["image"],
ztxt=ztxt_p,
)
corrects.append(correct_p[0])
if self._count_correct_p not in self._compiled:
logging.info("Compiled image embeddings in %.1fs",
time.monotonic() - t0)
t0 = time.monotonic()
self._compiled.add(self._count_correct_p)
if return_embeddings:
embeddings.append(embs_p[0])
for name in data:
data[name].append(self._gather_concatenate_p(batch[name])[0])
ns = np.array(ns)
n = ns.sum()
correct = np.array(corrects).sum()
logging.info("Embedded %s image in %d steps - ...%s", dataset_name, len(ns),
ns[-10:])
logging.info("Totalling %d image in %.1fs", n, time.monotonic() - t0)
ret = {
"accuracy": correct / n,
"correct": correct,
"count": n,
}
logging.info("Dataset %s, results %s", dataset_name, ret)
if return_embeddings:
data["embedding"] = embeddings
data = {k: np.concatenate(v, axis=0) for k, v in data.items()}
logging.info("Total images embeddings size %.1fM",
data["embedding"].nbytes / 1e6)
mask = data.pop("mask").astype(bool)
ret["images"] = {k: v[mask] for k, v in data.items()}
ret["texts"] = texts
return ret
def run(self, params):
"""Returns metrics."""
return [(f"{dataset_name}_accuracy",
self.evaluate(params, dataset_name)["accuracy"])
for dataset_name in self.datasets]
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used by the module `prompt_engineering` in the same directory."""
CLIP_PAPER_PROMPT_TEMPLATES = [
'a bad photo of a {}.',
'a photo of many {}.',
'a sculpture of a {}.',
'a photo of the hard to see {}.',
'a low resolution photo of the {}.',
'a rendering of a {}.',
'graffiti of a {}.',
'a bad photo of the {}.',
'a cropped photo of the {}.',
'a tattoo of a {}.',
'the embroidered {}.',
'a photo of a hard to see {}.',
'a bright photo of a {}.',
'a photo of a clean {}.',
'a photo of a dirty {}.',
'a dark photo of the {}.',
'a drawing of a {}.',
'a photo of my {}.',
'the plastic {}.',
'a photo of the cool {}.',
'a close-up photo of a {}.',
'a black and white photo of the {}.',
'a painting of the {}.',
'a painting of a {}.',
'a pixelated photo of the {}.',
'a sculpture of the {}.',
'a bright photo of the {}.',
'a cropped photo of a {}.',
'a plastic {}.',
'a photo of the dirty {}.',
'a jpeg corrupted photo of a {}.',
'a blurry photo of the {}.',
'a photo of the {}.',
'a good photo of the {}.',
'a rendering of the {}.',
'a {} in a video game.',
'a photo of one {}.',
'a doodle of a {}.',
'a close-up photo of the {}.',
'a photo of a {}.',
'the origami {}.',
'the {} in a video game.',
'a sketch of a {}.',
'a doodle of the {}.',
'a origami {}.',
'a low resolution photo of a {}.',
'the toy {}.',
'a rendition of the {}.',
'a photo of the clean {}.',
'a photo of a large {}.',
'a rendition of a {}.',
'a photo of a nice {}.',
'a photo of a weird {}.',
'a blurry photo of a {}.',
'a cartoon {}.',
'art of a {}.',
'a sketch of the {}.',
'a embroidered {}.',
'a pixelated photo of a {}.',
'itap of the {}.',
'a jpeg corrupted photo of the {}.',
'a good photo of a {}.',
'a plushie {}.',
'a photo of the nice {}.',
'a photo of the small {}.',
'a photo of the weird {}.',
'the cartoon {}.',
'art of the {}.',
'a drawing of the {}.',
'a photo of the large {}.',
'a black and white photo of a {}.',
'the plushie {}.',
'a dark photo of a {}.',
'itap of a {}.',
'graffiti of the {}.',
'a toy {}.',
'itap of my {}.',
'a photo of a cool {}.',
'a photo of a small {}.',
'a tattoo of the {}.',
'{}',
]
CLIP_BEST_PROMPT_TEMPLATES = [
'itap of a {}.',
'a bad photo of the {}.',
'a origami {}.',
'a photo of the large {}.',
'a {} in a video game.',
'art of the {}.',
'a photo of the small {}.',
'{}',
]
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for image_text_retrieval."""
from typing import Mapping
from absl.testing import absltest
from absl.testing import parameterized
from big_vision.evaluators.proj.image_text import image_text_retrieval
import numpy as np
class ImTextRetrievalTest(parameterized.TestCase):
@parameterized.parameters(
(np.array([[0.0, 0.0, 0.1, 0.5, 0.1, 0.2, 0.5, 0.1],
[0.5, 0.4, 0.0, 0.0, 0.4, 0.2, 0.6, 0.4],
[0.5, 0.4, 0.1, 0.5, 0.0, 0.0, 0.8, 0.3],
[0.5, 0.4, 0.1, 0.5, 0.3, 0.2, 0.0, 0.0]]), {
'Recall@1': 1.0,
'Recall@5': 1.0,
'Recall@10': 1.0
}), #
(np.array([[0.8, 0.8, 0.1, 0.5, 0.1, 0.2, 0.5, 0.1],
[0.5, 0.4, 0.0, 0.0, 0.4, 0.2, 0.6, 0.4],
[0.5, 0.4, 0.1, 0.5, 0.0, 0.8, 0.8, 0.3],
[0.5, 0.4, 0.1, 0.5, 0.4, 0.2, 0.3, 0.3]]), {
'Recall@1': 0.5,
'Recall@5': 0.75,
'Recall@10': 1.0
}))
def test_image_to_text_retrieval_eval(self, dist_matrix: np.ndarray,
expected: Mapping[str, float]):
"""Checks `image_to_text_retrieval_eval`.
Args:
dist_matrix: Distance matrix between image (rows) and text (columns).
expected: Expected eval results.
"""
self.assertEqual(
image_text_retrieval.image_to_text_retrieval_eval(
dist_matrix, [0, 0, 1, 1, 2, 2, 3, 3]), expected)
@parameterized.parameters(
(np.array([[0.0, 0.0, 0.1, 0.5, 0.1, 0.2, 0.5, 0.1],
[0.5, 0.4, 0.0, 0.0, 0.4, 0.2, 0.6, 0.4],
[0.5, 0.4, 0.1, 0.5, 0.0, 0.0, 0.8, 0.3],
[0.5, 0.4, 0.1, 0.5, 0.3, 0.2, 0.0, 0.0]]), {
'Recall@1': 1.0,
'Recall@5': 1.0,
'Recall@10': 1.0
}), #
(np.array([[0.8, 0.8, 0.1, 0.5, 0.1, 0.2, 0.1, 0.1],
[0.5, 0.4, 0.0, 0.0, 0.4, 0.2, 0.6, 0.4],
[0.5, 0.4, 0.1, 0.5, 0.0, 0.8, 0.8, 0.3],
[0.5, 0.4, 0.1, 0.5, 0.4, 0.2, 0.3, 0.3]]), {
'Recall@1': 0.375,
'Recall@5': 1.0,
'Recall@10': 1.0
}))
def test_image_text_retrieval(self, dist_matrix: np.ndarray,
expected: Mapping[str, float]):
"""Checks `text_to_image_retrieval_eval`.
Args:
dist_matrix: Distance matrix between image (rows) and text (columns).
expected: Expected eval results.
"""
self.assertEqual(
image_text_retrieval.text_to_image_retrieval_eval(
dist_matrix, [0, 0, 1, 1, 2, 2, 3, 3]), expected)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for prompt_engineering."""
from absl.testing import absltest
from big_vision.evaluators.proj.image_text import prompt_engineering
class PromptEngineeringTest(absltest.TestCase):
def test_canonicalize(self):
self.assertEqual(prompt_engineering._canonicalize("test_test"), "test test")
self.assertEqual(
prompt_engineering._canonicalize("test___test"), "test test")
self.assertEqual(prompt_engineering._canonicalize("test"), "test")
self.assertEqual(prompt_engineering._canonicalize("test."), "test")
self.assertEqual(prompt_engineering._canonicalize(" test "), "test")
self.assertEqual(
prompt_engineering._canonicalize("test\ntest"), "test test")
self.assertEqual(
prompt_engineering._canonicalize("test test"), "test test")
self.assertEqual(prompt_engineering._canonicalize("test {}"), "test")
self.assertEqual(
prompt_engineering._canonicalize(
"test {}", keep_punctuation_exact_string="{}"), "test {}")
self.assertEqual(
prompt_engineering._canonicalize(
" test {}...", keep_punctuation_exact_string="{}"), "test {}")
self.assertEqual(
prompt_engineering._canonicalize(
"test {} {} {}", keep_punctuation_exact_string="{}"),
"test {} {} {}")
if __name__ == "__main__":
absltest.main()
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator for the contrastive task.
DON'T COMPARE ACROSS RUNS, use for training health monitoring only.
Note that this evaluator's `ncorrect_minibatch` is only a rough proxy for
training progress and does not report the actual `ncorrect`: when the same
labels found multiple times in a batch, then the reported value is biased
towards lower values.
Also note that the `ncorrect_minibatch` is a function of batch size (it's a lot
easier to find correct values in small batches).
"""
import functools
from big_vision import input_pipeline
import big_vision.datasets.core as ds_core
import big_vision.pp.builder as pp_builder
import big_vision.utils as u
import jax
import jax.numpy as jnp
import numpy as np
def _all_gather(z):
"""All gather and flatten first two dims."""
gather_flat = lambda x: jnp.concatenate(jax.lax.all_gather(x, "batch"), 0)
return jax.tree_map(gather_flat, z)
# To avoid re-compiling the function for every new instance of the same
# evaluator on a different dataset!
@functools.lru_cache(None)
def get_eval_fn(predict_fn, use_global_batch):
"""Produces eval function, also applies pmap."""
@functools.partial(jax.pmap, axis_name="batch")
def _eval_fn(params, images, labels, mask):
zimg, ztxt, extras = predict_fn(params, images, labels)
if use_global_batch:
zimg, ztxt, mask = _all_gather((zimg, ztxt, mask))
# Temperature won't affect ranking for accuracy, but impacts loss magnitude.
losses, measurements = u.bidirectional_contrastive_loss(
zimg, ztxt, extras["t"], mask, reduction=False)
l = jax.lax.psum(losses * mask, axis_name="batch")
c = jax.lax.psum(measurements["ncorrect"] * mask, axis_name="batch")
n = jax.lax.psum(mask, axis_name="batch")
return c, l, n
return _eval_fn
class Evaluator:
"""Contrastive evaluator."""
def __init__(self, predict_fn, data, pp_fn, batch_size,
use_global_batch, cache_final=True,
cache_raw=False, prefetch=1, label_key="labels"):
data = ds_core.get(**data)
pp_fn = pp_builder.get_preprocess_fn(pp_fn)
self.ds, self.steps = input_pipeline.make_for_inference(
data.get_tfdata(ordered=True), pp_fn, batch_size,
num_ex_per_process=data.num_examples_per_process(),
cache_final=cache_final, cache_raw=cache_raw)
self.data_iter = input_pipeline.start_input_pipeline(self.ds, prefetch)
self.eval_fn = get_eval_fn(predict_fn, use_global_batch)
self.label_key = label_key
def run(self, params):
"""Computes all metrics."""
l, c, nseen = 0, 0, 0
for _, batch in zip(range(self.steps), self.data_iter):
labels, mask = batch.pop(self.label_key), batch.pop("_mask")
batch_ncorrect, batch_losses, batch_n = self.eval_fn(
params, batch["image"], labels, mask)
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
c += np.sum(np.array(batch_ncorrect[0]))
l += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
yield ("ncorrect_minibatch", c / nseen)
yield ("loss", l / nseen)
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates image-text retrieval results."""
from typing import List, Mapping
import numpy as np
RECALL_THRESHOLDS = (1, 5, 10)
def text_to_image_retrieval_eval(
dist_matrix: np.ndarray,
text_image_correspondence: List[int]) -> Mapping[str, float]:
"""Runs the text-to-image retrieval eval from the distance matrix.
Args:
dist_matrix: Distance matrix between text and image embeddings (shape
N_IMAGES x N_TEXTS).
text_image_correspondence: Mapping between rows and columns of
`dist_matrix`, that is, a list of N_TEXTS integers n_i that represent that
the text embedding in column i corresponds to the image embedding in row
n_i. Please note that many texts can be assigned to the same image. For
instance, if we have 2 images and 4 texts (i.e. dist_matrix is 2x4), then
`text_image_correspondence = [0, 0, 1, 1]` means that the two first texts
correspond to the first image and the two last texts to the second image.
Returns:
A dictionary with the Recall@k scores for k in RECALL_THRESHOLDS.
"""
per_text_ranks = dist_matrix.argsort(axis=0)
text_image_correspondence = np.array(text_image_correspondence)
def recall_at(k):
wins = per_text_ranks[:k, :] == text_image_correspondence[None]
return wins.any(axis=0).mean()
return {
f'Recall@{k}': recall_at(k)
for k in RECALL_THRESHOLDS
}
def image_to_text_retrieval_eval(
dist_matrix: np.ndarray,
text_image_correspondence: List[int]) -> Mapping[str, float]:
"""Runs the image-to-text retrieval eval from the distance matrix.
Args:
dist_matrix: Distance matrix between text and image embeddings (shape
N_IMAGES x N_TEXTS).
text_image_correspondence: Mapping between rows and columns of
`dist_matrix`, that is, a list of N_TEXTS integers n_i that represent that
the text embedding in column i corresponds to the image embedding in row
n_i. Please note that many texts can be assigned to the same image. For
instance, if we have 2 images and 4 texts (i.e. dist_matrix is 2x4), then
`text_image_correspondence = [0, 0, 1, 1]` means that the two first texts
correspond to the first image and the two last texts to the second image.
Returns:
A dictionary with the Recall@k scores for k in RECALL_THRESHOLDS.
"""
per_image_ranks = dist_matrix.argsort(axis=1)
text_image_correspondence = np.array(text_image_correspondence)
def recall_at(k):
top_k_images = text_image_correspondence[per_image_ranks[:, :k]]
wins = top_k_images == np.arange(len(per_image_ranks))[:, None]
return wins.any(axis=1).mean()
return {
f'Recall@{k}': recall_at(k)
for k in RECALL_THRESHOLDS
}
|
# Copyright 2022 Big Vision Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-host image->text and text->image retrieval evaluation.
Example how to add to config:
config.evals {}
config.evals.retieval = dict(log_steps=1200, type='proj.image_text.retrieval')
config.evals.retrieval.dataset = 'coco_captions'
config.evals.retrieval.txt_name = ('captions', 'text')
# Note that initial "decode|" is not needed.
config.evals.retrieval.pp_img = 'resize(224)|value_range(-1,1)'
# Raw text strings use key "texts" in feature dict. The evaluator expects
# tokenized text with key "labels".
config.evals.retrieval.pp_txt = (
'tokenize(max_len=16, eos="sticky", pad_value=1, inkey="texts", '
' outkey="labels")')
Example to support precomputed data:
See `big_vision/configs/proj/image_text/lit.py`.
"""
import functools
import operator
import time
from absl import logging
from big_vision.evaluators.proj.image_text import image_text_retrieval
import big_vision.pp.builder as pp_builder
from clu import deterministic_data
import jax
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def _with_infinite_padding(dataset):
"""Adds "infinite padding" to the dataset."""
filler_element = tf.nest.map_structure(
lambda spec: tf.zeros(spec.shape, spec.dtype)[None], dataset.element_spec)
filler_element["mask"] = [False]
filler_dataset = tf.data.Dataset.from_tensor_slices(filler_element)
dataset = dataset.map(
lambda features: dict(mask=True, **features),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.concatenate(filler_dataset.repeat(None))
def _pad_and_batch(dataset, batch_dims):
"""Adds padding and then batches dataset."""
dataset = _with_infinite_padding(dataset)
for batch_size in reversed(batch_dims):
dataset = dataset.batch(batch_size)
return dataset
# This is needed so retrieval_test can replace dataset info.
def _get_dataset_info(builder):
return builder.info
def prepare_datasets(dataset, *, pp_img, pp_txt, txt_name, offset=0):
"""Returns unbatched `ds_images, ds_texts` datasets.
Args:
dataset: An image-text `tf.data.Dataset` that is expected to contain the
following features: "image" (dtype=uint8, shape=[None, None, 3]),
`txt_name` (dtype=string, shape=[None]).
pp_img: String defining pre-processing for images. The pre-processing can
expect the following features to be prepared: "image", "id". The
pre-processing should convert the "image" (dtype=uint8,
shape=[None, None, 3]) to "image" (dtype=float32, shape=[sz, sz, 3]).
pp_txt: String defining pre-processing for text. The pre-processing can
expect the following features to be prepared: "texts", "id", "caption_id".
The pre-processing should convert the "texts" (dtype=string, shape=[])
into a tokenized "labels" (dtype=int32, shape=[max_len]).
txt_name: Name of the text feature to unroll in the original `dataset`. Can
be a simple string feature name, or an iterable of strings to specify a
nested feature (e.g. for "coco_captions", this would be
`('captions', 'text')`).
offset: Offset that should be added to enumerated examples to generate IDs.
In a multi-host setup, this is typically set to a value large enough to
make all IDs distinct.
Returns:
Image and text datasets.
"""
def get_feature_value(data, feature_name):
if isinstance(feature_name, str):
feature_name = [feature_name]
return functools.reduce(operator.getitem, feature_name, data)
def get_captions(idx, features):
"""Returns a dataset with unrolled "caption" for every example."""
texts = get_feature_value(features, txt_name)
texts_n = tf.shape(texts)[0]
return tf.data.Dataset.from_tensor_slices({
"id": tf.tile([idx + offset], [texts_n]),
"caption_i": tf.stack(tf.range(texts_n)),
"texts": tf.stack(texts),
})
def add_id(idx, features):
return {**features, "id": idx + offset}
ds_images = dataset.enumerate().map(add_id).map(
pp_builder.get_preprocess_fn(f"{pp_img}|keep('id', 'image')"))
ds_texts = dataset.enumerate().flat_map(get_captions).map(
pp_builder.get_preprocess_fn(
f"{pp_txt}|keep('id', 'caption_i', 'labels')"))
return ds_images, ds_texts
def _split_and_batch(dataset_name, batch_size, split, get_ds, data_dir=None):
"""Splits dataset, calls `get_ds` and returns padded + batched datasets."""
assert not batch_size % jax.device_count(), (
f"batch_size={batch_size} % jax.device_count()={jax.device_count()}")
builder = tfds.builder(dataset_name, data_dir=data_dir)
batch_dims = [
jax.local_device_count(), batch_size // jax.device_count()
]
info = _get_dataset_info(builder)
num_examples = info.splits[split].num_examples
read_instruction = deterministic_data.get_read_instruction_for_host(
split=split,
dataset_info=info,
remainder_options=deterministic_data.RemainderOptions.ON_FIRST_PROCESS)
ds_images, ds_texts = get_ds(builder.as_dataset(split=read_instruction),
offset=jax.process_index() * num_examples)
return (
_pad_and_batch(ds_images, batch_dims),
_pad_and_batch(ds_texts, batch_dims),
)
class Evaluator:
"""Image/text retrieval evaluator."""
def __init__(self,
predict_fn,
*,
dataset,
pp_img,
pp_txt,
txt_name,
batch_size,
data_dir=None,
split="test"):
"""Initializes a new zero-shot image/text retrieval evaluator.
See `prepare_datasets()` for details on how the dataset is pre-processed.
Args:
predict_fn: Prediction function with signature
`zimg, ztxt, out = predict_fn(params, images, texts)`
dataset: The TFDS dataset name of the eval data.
pp_img: Preprocessing string for images. Preprocessed features should
contain key "image" with value that can be batched and is suitable for
`predict_fn(images)` input``.
pp_txt: Preprocessing string for texts. Can expect "texts" key as an input
(shape=[], dtype=string), and is expected to produce "labels" key that
is suitable for `predict_fn(texts)` input.
txt_name: The name of the feature of captions (can be a tuple to look up a
value in a nested feature dictionary). Expected shape=[None],
dtype=string. specified then items are used as lookup path.
batch_size: Global batch size.
data_dir: Optional dir to load the TFDS dataset from.
split: The split of the eval data.
"""
self.ds_images, self.ds_texts = _split_and_batch(
dataset, batch_size, split,
functools.partial(
prepare_datasets, pp_img=pp_img, pp_txt=pp_txt, txt_name=txt_name),
data_dir=data_dir)
self._axis_name = "batch"
def embed_images(params, images):
zimg, _, _ = predict_fn(params, images, None)
return jax.lax.all_gather(zimg, axis_name=self._axis_name)
def embed_texts(params, texts):
_, ztxt, _ = predict_fn(params, None, texts)
return jax.lax.all_gather(ztxt, axis_name=self._axis_name)
self._embed_images_p = jax.pmap(embed_images, axis_name=self._axis_name)
self._embed_texts_p = jax.pmap(embed_texts, axis_name=self._axis_name)
self._all_gather_p = jax.pmap(
lambda x: jax.lax.all_gather(x, axis_name=self._axis_name),
axis_name=self._axis_name)
self._count_p = jax.pmap(
lambda mask: jax.lax.psum(mask.sum(), axis_name=self._axis_name),
axis_name=self._axis_name)
self._compiled = set()
def _embed(self, name, params, ds, embed_fn, id_names):
"""Embeds features name `name` using `embed_fn`.
Args:
name: Feature name to be embedded.
params: Parameters for the predict_fn.
ds: The dataset.
embed_fn: A pmapped function that returns the embeddings.
id_names: An iterable of feature names that should be collected.
Returns:
A dictionary with "embeddings" and `id_names` as keys.
"""
ns = []
embeddings = []
ids = {id_name: [] for id_name in list(id_names) + ["mask"]}
t0 = time.time()
for batch in ds:
ns.append(self._count_p(np.asarray(memoryview(batch["mask"])))[0])
# Due to infinite padding, this loop will never end. We will stop once
# all processes only process padded data. We don't check the latest
# DeviceArray `ns[-1]` Because we want to keep our computation async for
# efficiency reasons.
if len(ns) >= 2 and ns[-2] == 0:
break
embs = embed_fn(params, np.asarray(memoryview(batch[name])))[0]
if embed_fn not in self._compiled:
logging.info("Compiled %s embeddings in %.3fs", name, time.time() - t0)
t0 = time.time()
self._compiled.add(embed_fn)
embeddings.append(embs.reshape([-1, embs.shape[-1]]))
for id_name in ids:
ids[id_name].append(
self._all_gather_p(np.array(batch[id_name]))[0].flatten())
# Only access DeviceArray at end of loop for better efficiency.
ns = np.array(ns)
embeddings = np.concatenate(embeddings)
ids = {k: np.concatenate(v) for k, v in ids.items()}
masks = ids.pop("mask").astype(bool)
logging.info("Processed %s in %d steps - ...%s", name, len(ns), ns[-10:])
n = ns.sum()
logging.info("Totalling %d %s in %.3fs", n, name, time.time() - t0)
return {
"embeddings": embeddings[masks],
**{k: v[masks] for k, v in ids.items()},
}
def evaluate(self, params):
"""Returns evaluation results."""
images = self._embed("image", params, self.ds_images, self._embed_images_p,
("id",))
texts = self._embed("labels", params, self.ds_texts, self._embed_texts_p,
("id", "caption_i"))
# Shapes: (nimg, emb) * (emb, ntxt) -> (nimg, ntxt)
similarities = np.dot(images["embeddings"], texts["embeddings"].T)
t0 = time.time()
id2img = {id_: i for i, id_ in enumerate(images["id"])}
text_image_correspondence = [id2img[id_] for id_ in texts["id"]]
img2txt = image_text_retrieval.image_to_text_retrieval_eval(
-similarities, text_image_correspondence)
txt2img = image_text_retrieval.text_to_image_retrieval_eval(
-similarities, text_image_correspondence)
logging.info("Computed retrieval metrics in %.3fs", time.time() - t0)
return dict(
images=images,
texts=texts,
img2txt=img2txt,
txt2img=txt2img,
)
def run(self, params):
"""Returns metrics."""
results = self.evaluate(params)
return [(f"{direction}_{k.lower()}", v)
for direction in ("img2txt", "txt2img")
for k, v in results[direction].items()]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.