seed
stringlengths 25
1.88k
| seed_api
stringlengths 14
102
| index
int64 0
1.05k
|
---|---|---|
import tensorflow as tf
encoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
encoder_out, encoder_state = tf.nn.dynamic_rnn(
cell=encoder_cells, inputs=forward, sequence_length=seq_lens, dtype=tf.float32
)
encoder_state = tuple(encoder_state[-1] for _ in range(num_layers))
decoder_cell = attention(encoder_out, seq_lens)
dense_layer = tf.layers.Dense(n_mels * resampled)
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=self.decoder_inputs, sequence_length=seq_lens, time_major=False
)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=decoder_cell,
helper=training_helper,
initial_state=decoder_cell.zero_state(batch_size, tf.float32).clone(
cell_state=encoder_state
),
output_layer=dense_layer,
| tensorflow.contrib.seq2seq.TrainingHelper | 200 |
import tensorflow as tf
while True:
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return np.zeros([0, 0, self.lm_size, self.lm_layers])
file_key = doc_key.replace("/", ":")
group = self.lm_file[file_key]
| tensorflow.global_variables | 201 |
from tensorflow.python.ops import array_ops
init_shape = [init_size] + fixed_shape
array = _create_local(
'array', shape=init_shape, validate_shape=False, dtype=values.dtype)
size = _create_local('size', shape=[], dtype=dtypes.int32)
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
| tensorflow.python.ops.array_ops.shape | 202 |
import tensorflow as tf
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))
else:
for pred_ind in list(range(len(pred_outputs))):
mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind],
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(pred_ind),
loss_collection=None,#tf.GraphKeys.LOSSES,
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements
mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss')
tf.summary.scalar('mse', mse_loss)
tf.losses.add_loss(mse_loss)
# bce_loss_list = []
# for pred_ind in list(range(len(pred_outputs))):
# bce_loss_list.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred_outputs[pred_ind], labels=targets_list[pred_ind]/255., name='loss_{}'.format(pred_ind)), name='loss_mean_{}'.format(pred_ind)))
# mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss')
# tf.summary.scalar('mse', mse_loss)
# tf.losses.add_loss(mse_loss)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name])
| tensorflow.losses.add_loss | 203 |
from tensorflow.python.platform import tf_logging as logging
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
| tensorflow.python.platform.tf_logging.warning | 204 |
import tensorflow as tf
nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of
hops. Specify node set of each hop, including the root.
adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
matrix between hops.
"""
nodes = tf.reshape(nodes, [-1])
nodes_list = [nodes]
adj_list = []
for hop_edge_types in edge_types:
neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = [tf.size(nodes), tf.size(next_nodes)]
next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape)
next_adj = tf.sparse.reorder(next_adj)
nodes_list.append(next_nodes)
adj_list.append(next_adj)
nodes = next_nodes
return nodes_list, adj_list
| tensorflow.sparse.reorder | 205 |
from tensorflow.python.ops import variables as vars_
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
| tensorflow.python.ops.variables.trainable_variables | 206 |
import tensorflow as tf
def getinputs(path):
filename_queue=tf.train.string_input_producer([path])
| tensorflow.train.string_input_producer | 207 |
import tensorflow as tf
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
| tensorflow.logical_or | 208 |
import tensorflow as tf
"""
L= len(activation) #number of layers
m = Y.shape[1] #number of training examples
last = activation[L-1]
labels= tf.transpose(Y)
if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function
logits= tf.transpose(betan*zn[1])
cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels))
elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss)
out = tf.transpose(zn[0])
cost = tf.reduce_mean(tf.squared_difference(out, labels))/2
return cost
#------------Hessian-------------------
def flatten(tensor):
| tensorflow.losses.sigmoid_cross_entropy | 209 |
import tensorflow as tf
# Adds a set of collections.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("float_collection", 3.5)
tf.add_to_collection("string_collection", "hello")
tf.add_to_collection("variable_collection", v0)
# Add QueueRunners.
tf.train.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
tf.add_to_collection("user_defined_string_collection", str(queue_runner))
tf.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
| tensorflow.train.add_queue_runner | 210 |
from tensorflow.contrib.eager.python import tfe
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| tensorflow.contrib.eager.python.tfe.run | 211 |
from tensorflow.python.framework import ops
sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end
sess.run(linear_var.initializer)
sess.run(sequence_var.initializer)
print(sess.run(linear_var))
print(sess.run(sequence_var))
rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0)
runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4)
print(sess.run(rnorm_var))
print(sess.run(runif_var))
ops.reset_default_graph()
sess = tf.Session()
my_var = tf.Variable(tf.zeros([1,20]))
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs", graph=sess.graph)
initialize_op = tf.global_variables_initializer()
sess.run(initialize_op)
| tensorflow.python.framework.ops.reset_default_graph | 212 |
from tensorflow.python.framework import tensor_shape
input_shape = input_shape.merge_with(out_backprop_shape)
vector_dim = input_shape[3]
vector_dim = vector_dim.merge_with(mean_shape[0])
vector_dim = vector_dim.merge_with(var_shape[0])
vector_dim = vector_dim.merge_with(beta_shape[0])
return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)
ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape)
ops.RegisterShape("DepthwiseConv2dNative")(
| tensorflow.python.framework.tensor_shape.vector | 213 |
import tensorflow as tf
dtype=tf.float32):
"""Returns a input_receiver_fn for raw images during serving."""
def _preprocess_image(encoded_image):
"""Preprocess a single raw image."""
image = tf.image.decode_image(encoded_image, channels=shape[-1])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
| tensorflow.image.decode_image | 214 |
import tensorflow as tf
"""Verbosity level for summary ops. Pass 0 to disable
both summaries and checkpoints.""")
tf.flags.DEFINE_integer('save_summaries_steps', 0,
"""How often to save summaries for trained models.
Pass 0 to disable summaries.""")
tf.flags.DEFINE_integer('save_model_secs', 0,
"""How often to save trained models. Pass 0 to disable
checkpoints""")
tf.flags.DEFINE_string('train_dir', None,
"""Path to session checkpoints.""")
| tensorflow.flags.DEFINE_integer | 215 |
import tensorflow as tf
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
| tensorflow.train.start_queue_runners | 216 |
from tensorflow.contrib.learn.python.learn.estimators import test_data
iris.data[:, i], dtype=dtypes.float32), (-1, 1))
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=('en', 'fr', 'zh'),
indices=((0, 0), (0, 1), (60, 0)),
dense_shape=(len(iris.target), 2))
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), (-1, 1))
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(
cont_features[i],
test_data.get_quantile_based_buckets(iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
| tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression | 217 |
import tensorflow as tf
'swish':swish,
'gelu':gelu
}
lr_schedules = {
'warmup_cosine':warmup_cosine,
'warmup_linear':warmup_linear,
'warmup_constant':warmup_constant,
}
def _norm(x, g=None, b=None, e=1e-5, axis=[1]):
u = tf.reduce_mean(x, axis=axis, keep_dims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True)
x = (x - u) * tf.rsqrt(s + e)
if g is not None and b is not None:
x = x*g + b
return x
def norm(x, scope, axis=[-1]):
with tf.variable_scope(scope):
n_state = shape_list(x)[-1]
g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0))
return _norm(x, g, b, axis=axis)
def dropout(x, pdrop, train):
| tensorflow.rsqrt | 218 |
import tensorflow as tf
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12
writer = tf.train.SummaryWriter('logs/', sess.graph)
| tensorflow.train.SummaryWriter | 219 |
import tensorflow as tf
if mode != 'gen':
targets = tf.reshape(targets_nunroll, shape=[batch_size * rnn_nunroll])
target_weights = tf.reshape(target_weights_nunroll, shape=[batch_size * rnn_nunroll])
# CNN
cnn_output = feats_audio
if do_cnn:
layer_last = feats_audio
nfilt_last = audio_nchannels
for i, ((ntime, nband, nfilt), (ptime, pband)) in enumerate(zip(cnn_filter_shapes, cnn_pool)):
layer_name = 'cnn_{}'.format(i)
with tf.variable_scope(layer_name):
filters = tf.get_variable('filters', [ntime, nband, nfilt_last, nfilt], initializer=cnn_init, dtype=dtype)
biases = tf.get_variable('biases', [nfilt], initializer=tf.constant_initializer(0.1), dtype=dtype)
if cnn_rnn_zack:
padding = 'SAME'
else:
padding = 'VALID'
conv = tf.nn.conv2d(layer_last, filters, [1, 1, 1, 1], padding=padding)
biased = tf.nn.bias_add(conv, biases)
convolved = tf.nn.relu(biased)
pool_shape = [1, ptime, pband, 1]
pooled = tf.nn.max_pool(convolved, ksize=pool_shape, strides=pool_shape, padding='SAME')
print('{}: {}'.format(layer_name, pooled.get_shape()))
| tensorflow.constant_initializer | 220 |
import tensorflow as tf
dataset.training_X, dataset.training_y, dataset.validation_X, dataset.validation_y
if not os.path.exists(weights_dir):
os.mkdir(weights_dir)
if not os.path.exists(weights_dir + '/best_models'):
os.mkdir(weights_dir + '/best_models')
# Create a saver.
saver = tf.train.Saver(max_to_keep=None)
if self.is_summary:
training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES)
training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES)
validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES)
validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(
| tensorflow.merge_all_summaries | 221 |
import tensorflow as tf
dual_variable: The underlying variable itself.
"""
# We disable partitioning while constructing dual variables because they will
# be updated with assign, which is not available for partitioned variables.
partitioner = tf.get_variable_scope().partitioner
try:
tf.get_variable_scope().set_partitioner(None)
dual_variable = tf.contrib.framework.model_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable)
| tensorflow.contrib.framework.model_variable | 222 |
import tensorflow as tf
loop_vars=[n, result, two])
return result
def factorial(n: TensorLike) -> TensorLike:
n = tf.convert_to_tensor(value=n)
return tf.exp(tf.math.lgamma(n + 1))
def generate_l_m_permutations(
max_band: int,
name: str = "spherical_harmonics_generate_l_m_permutations") -> Tuple[TensorLike, TensorLike]:
with tf.name_scope(name):
| tensorflow.math.lgamma | 223 |
from tensorflow.python.ops import math_ops
self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda")
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype)
| tensorflow.python.ops.math_ops.cast | 224 |
import tensorflow as tf
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
| tensorflow.gfile.Copy | 225 |
from tensorflow.python.ops import sparse_ops
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
| tensorflow.python.ops.sparse_ops.sparse_reshape | 226 |
from tensorflow.python.ops import init_ops
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.init_ops.constant_initializer | 227 |
from tensorflow.python.ops import gradients
"""See base class."""
global_step = variables.get_global_step()
assert global_step
loss = self._loss(
self._logits(features), targets, self._get_weight_tensor(features))
logging_ops.scalar_summary("loss", loss)
linear_vars = self._get_linear_vars()
dnn_vars = self._get_dnn_vars()
grads = gradients.gradients(loss, dnn_vars + linear_vars)
dnn_grads = grads[0:len(dnn_vars)]
linear_grads = grads[len(dnn_vars):]
train_ops = self._get_linear_training_ops(
linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads,
dnn_vars)
train_step = control_flow_ops.group(*train_ops, name="combined_training_op")
| tensorflow.python.ops.gradients.gradients | 228 |
import tensorflow as tf
block_conv_input = bottom
input_filter = bottom.get_shape().as_list()[-1]
block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a")
block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_1 = tf.nn.relu(block_norm_1)
block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b")
block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_2 = tf.nn.relu(block_norm_2)
block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c")
block_res = tf.add(block_conv_input, block_conv_3)
relu = tf.nn.relu(block_res)
return relu
def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"):
return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name)
def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"):
return tf.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name)
def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name):
| tensorflow.nn.relu | 229 |
import tensorflow as tf
var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'is not finite'):
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
class TeacherTask(base_model.BaseTask):
@base_layer.initializer
def __init__(self, params):
super(TeacherTask, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
self.CreateVariable('x',
| tensorflow.is_finite | 230 |
import tensorflow as tf
#
# c = tf.constant('haHa')
# print(sess.run(c))
#
# sess.close()
identity_matrix = tf.diag([1.0, 3.0, 1.0])
A = tf.truncated_normal([2, 3])
B = tf.fill([2, 3], 5.0)
C = tf.random_uniform([3, 2], maxval=100)
D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]]))
sess = tf.Session()
# sess.run(tf.global_variables_initializer())
| tensorflow.diag | 231 |
import tensorflow as tf
return tuple(restored)
def import_ops(self):
if self._is_training:
self._train_op = tf.get_collection_ref('train_op')[0]
self._lr = tf.get_collection_ref('lr')[0]
self._new_lr = tf.get_collection_ref('new_lr')[0]
self._lr_update = tf.get_collection_ref('lr_update')[0]
rnn_params = tf.get_collection_ref('rnn_params')
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
| tensorflow.get_collection_ref | 232 |
from tensorflow.python.training import training as train
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = train.get_global_step()
else:
train.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
| tensorflow.python.training.training.assert_global_step | 233 |
import tensorflow as tf
if monitorSession:
# MonitoredSession
# this will restore all the variables from the latest checkpoint if it exists
self._fix_checkpoint_abs_to_rel(self._checkpoint_dir) # need to ensure checkpoint has relative path saved
chiefsess_creator = tf.train.ChiefSessionCreator(config=sess_config, checkpoint_dir=self._checkpoint_dir)
if self._restore_chkptfile is not None:
self._network.init_saver()
# this is restoring variables
| tensorflow.train.ChiefSessionCreator | 234 |
import tensorflow as tf
kk = tf.Variable(0, dtype=tf.int64)
for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'):
for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'):
to_add = tf.cond(
tf.greater(vz.lookup(vx_keys[i]), -1),
true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])),
false_fn=lambda: tf.constant(0, dtype=tf.int64)
)
kk = tf.math.add(kk, to_add)
kernel[l][m] = kk
return tf.convert_to_tensor(kernel, dtype=tf.int64)
def dim(self):
return self._dim
| tensorflow.math.add | 235 |
import tensorflow as tf
def _get_features_dict(input_dict):
"""Extracts features dict from input dict."""
source_id = _replace_empty_string_with_random_number(
input_dict[fields.InputDataFields.source_id])
hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
features = {
fields.InputDataFields.image:
input_dict[fields.InputDataFields.image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape:
| tensorflow.string_to_hash_bucket_fast | 236 |
from tensorflow.python.ops import parsing_ops
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
| tensorflow.python.ops.parsing_ops.FixedLenFeature | 237 |
from tensorflow.python.ops import array_ops
def _move_tensors(tensors, device):
"""Moves a list of tensors to a device by concatenating/splitting them."""
# Reset the device setting to avoid weird interactions with device merging
# logic.
with ops.device(None):
if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
sizes = array_ops.stack(
[array_ops.shape(tensor)[0] for tensor in tensors])
values = array_ops.concat(tensors, axis=0)
| tensorflow.python.ops.array_ops.stack | 238 |
import tensorflow as tf
graph_def = tf.get_default_graph().as_graph_def()
self.default_encoding_stage()
new_graph_def = tf.get_default_graph().as_graph_def()
tf.test.assert_equal_graph_def(graph_def, new_graph_def)
def test_encoding_stage_name(self):
| tensorflow.test.assert_equal_graph_def | 239 |
from tensorflow.python.ops import math_ops
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
| tensorflow.python.ops.math_ops.argmax | 240 |
import tensorflow as tf
row_splits=[0, 4, 4, 7, 8, 8]),
}
with tf.compat.v1.Session(graph=graph) as session:
schema = schema_inference.infer_feature_schema(outputs, graph, session)
| tensorflow.compat.v1.Session | 241 |
import tensorflow as tf
sess.run(update_fp_false,
{holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
dec_inp_fp_false)})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
| tensorflow.nn.rnn_cell.BasicLSTMCell | 242 |
import tensorflow as tf
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
| tensorflow.constant | 243 |
import tensorflow as tf
else:
dataset_path = os.path.join(dataset_path, 'dev.json')
# Opening with GFile allows to use remotely stored files, e.g.
# in a gs bucket.
dataset_handle = tf.io.gfile.GFile(dataset_path, 'r')
dataset = json.load(dataset_handle)
def mathqa_yield_examples(generator=None):
| tensorflow.io.gfile.GFile | 244 |
import tensorflow as tf
num_items=self.train_set.num_items, emb_size=self.num_factors,
reg_user=self.regs[0], reg_item=self.regs[1], seed=self.seed)
logits = tf.layers.dense(self.interaction, units=1, name='logits',
kernel_initializer=tf.initializers.lecun_uniform(self.seed))
self.prediction = tf.nn.sigmoid(logits)
self.loss = loss_fn(labels=self.labels, logits=logits)
| tensorflow.initializers.lecun_uniform | 245 |
import tensorflow as tf
def _smooth_l1(y_true, y_pred):
# y_true [batch_size, num_anchor, 4+1]
# y_pred [batch_size, num_anchor, 4]
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# 找到正样本
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# 计算 smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = tf.where(
| tensorflow.gather_nd | 246 |
import tensorflow as tf
output = tf.add_n([
w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1,
w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1,
w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1,
w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1
])
return output
def _meshgrid(depth, height, width, z_near, z_far):
with tf.variable_scope('_meshgrid'):
x_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
[depth, height, width])
y_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
[depth, width, height])
y_t = tf.transpose(y_t, [0, 2, 1])
sample_grid = tf.tile(
tf.linspace(float(z_near), float(z_far), depth), [width * height])
z_t = tf.reshape(sample_grid, [height, width, depth])
z_t = tf.transpose(z_t, [2, 0, 1])
z_t = 1 / z_t
| tensorflow.linspace | 247 |
import tensorflow as tf
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
_, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
| tensorflow.nn.seq2seq.embedding_rnn_decoder | 248 |
import tensorflow as tf
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_v3/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 7200,
'The frequency with which the model is saved, in seconds.')
| tensorflow.app.flags.DEFINE_string | 249 |
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
# To be defunnable, the function cannot return an Operation, so the above
# function is used for defun or eager, and this function is used in graph to be
# able to run the gradient updates.
def graph_step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
train_op = optimizer.apply_gradients(zip(grads, dynamics.variables))
return train_op, loss, samples
| tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.loss_and_grads | 250 |
import tensorflow as tf
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
| tensorflow.nn.rnn_cell.GRUCell | 251 |
import tensorflow as tf
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
| tensorflow.image.resize_nearest_neighbor | 252 |
import tensorflow as tf
for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta):
# Initialize config
embedding = config.embeddings.add()
# Specifiy the embedding variable and the metadata
embedding.tensor_name = embedding_name
embedding.metadata_path = path_to_meta
# Project the embeddings to space dimensions for visualization
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
| tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings | 253 |
import tensorflow as tf
in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT0")
in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT1")
else:
in0 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT0")
in1 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT1")
# If the input is a string, then convert each string to the
# equivalent int32 value.
if tf_input_dtype == tf.string:
in0 = tf.strings.to_number(in0, tf.int32)
in1 = tf.strings.to_number(in1, tf.int32)
add = tf.add(in0, in1, "ADD")
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
| tensorflow.strings.to_number | 254 |
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
if use_tpu:
# TPU host call. Important: need to be called before remove_summaries()
if hparams.tpu_enable_host_call:
host_call = t2t_model.create_host_call(hparams.model_dir)
else:
host_call = None
t2t_model.remove_summaries()
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
host_call=host_call,
training_hooks=[restore_hook, saver_hook])
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
| tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimatorSpec | 255 |
import tensorflow as tf
# In this notebook we demonstrate Trieste's ability to perform asynchronous Bayesian optimisation, as is suitable for scenarios where the objective function can be run for several points in parallel but where observations might return back at different times. To avoid wasting resources waiting for the evaluation of the whole batch, we immediately request the next point asynchronously, taking into account points that are still being evaluated. Besides saving resources, asynchronous approach also can potentially [improve sample efficiency](https://arxiv.org/abs/1901.10452) in comparison with synchronous batch strategies, although this is highly dependent on the use case.
#
# To contrast this approach with regular [batch optimization](batch_optimization.ipynb), this notebook also shows how to run parallel synchronous batch approach.
# %%
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
tf.get_logger().setLevel("ERROR")
import numpy as np
import time
import timeit
# %% [markdown]
# First, let's define a simple objective that will emulate evaluations taking variable time. We will be using a classic Bayesian optimisation benchmark function [Branin](https://www.sfu.ca/~ssurjano/branin.html) with a sleep call inserted in the middle of the calculation to emulate delay. Our sleep delay is a scaled sum of all input values to make sure delays are uneven.
# %%
from trieste.objectives import scaled_branin
| tensorflow.get_logger | 256 |
import tensorflow as tf
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
| tensorflow.nn.avg_pool3d | 257 |
import tensorflow as tf
def test_default_encoding_stage(self):
"""Tests the correctness of `default_encoding_stage`."""
stage = self.default_encoding_stage()
self.assertIsInstance(stage,
(encoding_stage.EncodingStageInterface,
encoding_stage.AdaptiveEncodingStageInterface))
# Calling the method again should create a new instance.
new_stage = self.default_encoding_stage()
self.assertIsNot(stage, new_stage)
def test_encoding_stage_constructor_does_not_modify_graph(self):
"""Tests that the constructor of encoding stage does not modify graph."""
graph_def = tf.get_default_graph().as_graph_def()
self.default_encoding_stage()
new_graph_def = tf.get_default_graph().as_graph_def()
tf.test.assert_equal_graph_def(graph_def, new_graph_def)
def test_encoding_stage_name(self):
"""Tests that the `name` property returns a string."""
stage = self.default_encoding_stage()
self.assertIsInstance(stage.name, str)
def test_default_input_is_tensor_with_fully_defined_shape(self):
"""Tests that `default_input` returns a `Tesnor` of fully defined shape."""
x = self.default_input()
self.assertIsInstance(x, tf.Tensor)
self.assertTrue(x.shape.is_fully_defined())
| tensorflow.get_default_graph | 258 |
from tensorflow.python.ops import math_ops
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
| tensorflow.python.ops.math_ops.matmul | 259 |
import tensorflow as tf
return [L_, tf.transpose(L_)]
tmp = tf.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
| tensorflow.scan | 260 |
import tensorflow as tf
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
features["partial_targets"] = tf.to_int64(features["inputs"])
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.target_modality
def infer_step(recent_output, recent_logits, unused_loss):
| tensorflow.to_int64 | 261 |
from tensorflow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary(support_reverse=True)
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = tokenizer
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
| tensorflow.contrib.learn.python.learn.preprocessing.text.CategoricalVocabulary | 262 |
from tensorflow.contrib import metrics as contrib_metrics
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
| tensorflow.contrib.metrics.streaming_concat | 263 |
import tensorflow as tf
def nin(x, num_units, **kwargs):
s = tf.shape(x)
sh = x.get_shape().as_list()
x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, [-1] + sh[1:-1] + [num_units])
| tensorflow.reduce_prod | 264 |
import tensorflow as tf
save_pb_at_end = config.get("save_pb", 0)
))
# summary hook
if config["save_summaries"]:
save_steps_summaries = self._get_steps(config["save_summaries_period"], self._time_reference_str)
self.set_summaries()
summary_hooks = [tf.train.SummarySaverHook(save_steps=save_steps_summaries,
output_dir=self._tensorboard_dir+sn.name,
summary_op=sn,
summary_writer=fw)
for sk in self.summary_keys for sn,fw in zip(self.summary_nodes[sk], self.summary_writers[sk])]
hooks += summary_hooks
# images input hook
| tensorflow.train.SummarySaverHook | 265 |
from tensorflow.python.ops import math_ops
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape or array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def top_k(input, k=1, sorted=True, name=None):
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
| tensorflow.python.ops.math_ops.inv | 266 |
from tensorflow.contrib import framework as contrib_framework
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
train_op, loss_op = self._get_train_ops(features, targets)
return train(
graph=g,
output_dir=self._model_dir,
| tensorflow.contrib.framework.create_global_step | 267 |
from tensorflow.python.ops import variable_scope
# accuracy is calculated only under 'ce_train', where true answer is given
if mode_gen == 'ce_train':
accuracy = _mask_and_accuracy(vocab_scores, answer_batch, loss_weights)
return accuracy, self._loss, sampled_words
else:
return None, self._loss, sampled_words
def calculate_encoder_features(self, encoder_states, encoder_dim):
options = self.options
input_shape = tf.shape(encoder_states)
batch_size = input_shape[0]
passage_len = input_shape[1]
with variable_scope.variable_scope("attention_decoder"):
encoder_features = tf.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim]
W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size])
self.W_h = W_h
encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size]
encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size])
return encoder_features
def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t,
encoder_states, encoder_features, passage_word_idx, passage_mask):
options = self.options
with variable_scope.variable_scope("attention_decoder"):
v = variable_scope.get_variable("v", [options.attention_vec_size])
v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
w_c = None
if options.use_coverage:
| tensorflow.python.ops.variable_scope.get_variable | 268 |
import tensorflow as tf
sequence_lengths = tf.convert_to_tensor(sequence_lengths)
| tensorflow.convert_to_tensor | 269 |
import tensorflow as tf
reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keepdims=True))
normalized_grad = old_div(grad, tf.sqrt(square))
else:
normalized_grad = tf.sign(grad)
normalized_grad = tf.stop_gradient(normalized_grad)
scaled_grad = eps * normalized_grad
#目标是让loss下降
adv_x = x - scaled_grad
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
#DeepFool 仅实现了目标攻击
def deepfool(x,
loss=None,
bounds=(0,1)):
(clip_min, clip_max)=bounds
grad, = tf.gradients(loss, x)
r=old_div(grad*loss,tf.reduce_sum(tf.square(grad)))
| tensorflow.clip_by_value | 270 |
import tensorflow as tf
vf_old, vf_old_params, _, _ = self.build_cnet(batch['state'], 'oldvf')
self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf')
self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1)
self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0)
self.eval_action = pi_eval.mode()
self.global_step = tf.train.get_or_create_global_step()
self.saver = tf.train.Saver()
# Loss functions and training
epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1)
ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6)
ratio = tf.clip_by_value(ratio, 0, 10)
surr1 = batch['advantage'] * ratio
surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay)
loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2))
loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf))
loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy())
loss = loss_pg + loss_vf + loss_entropy
opt = tf.train.AdamOptimizer(self.LR)
| tensorflow.train.polynomial_decay | 271 |
import tensorflow as tf
import tensorflow as tf
import numpy as np
import math
# weights initializers
he_normal = tf.contrib.keras.initializers.he_normal()
#he_normal = tf.contrib.layers.variance_scaling_initializer()
regularizer = tf.contrib.layers.l2_regularizer(1e-4)
def Convolutional_Block(inputs, shortcut, num_filters, name, is_training):
| tensorflow.contrib.keras.initializers.he_normal | 272 |
import tensorflow as tf
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
| tensorflow.nn.rnn | 273 |
from tensorflow.contrib import layers
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
target_column = layers.regression_target(
weight_column_name=weight_column_name,
target_dimension=target_dimension)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
| tensorflow.contrib.layers.regression_target | 274 |
import tensorflow.contrib.slim as slim
method=1)
tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)
loss_dict = outputs[-1]
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)
if __name__ == '__main__':
trainer = TrainR3DetGWD(cfgs)
trainer.main() | tensorflow.contrib.slim.learning.clip_gradient_norms | 275 |
from tensorflow.contrib import layers
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
target_column = layers.multi_class_target(
n_classes=n_classes,
| tensorflow.contrib.layers.multi_class_target | 276 |
import tensorflow as tf
def main(unused_argv):
tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled.
| tensorflow.compat.v1.enable_v2_behavior | 277 |
import tensorflow as tf
self.r: batch.r,
self.local_network.state_in[0]: batch.features[0],
self.local_network.state_in[1]: batch.features[1],
}
fetched = sess.run(fetches, feed_dict=feed_dict)
if should_compute_summary:
self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])
self.summary_writer.flush()
self.local_steps += 1
| tensorflow.Summary.FromString | 278 |
import tensorflow as tf
cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1])
log_p = tf.reduce_sum(self.log_pi * self.ac, [1])
worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p
worker_loss = -tf.reduce_sum(worker_loss, axis=0)
Am = self.r - self.manager_vf
manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))
Aw = (self.r + self.alpha * self.ri) - self.worker_vf
worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw))
entropy = -tf.reduce_sum(self.pi * self.log_pi)
beta = tf.train.polynomial_decay(beta_start, self.global_step,
end_learning_rate=beta_end,
decay_steps=decay_steps,
power=1)
# worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy])
self.loss = worker_loss + manager_loss + \
worker_vf_loss + manager_vf_loss - \
entropy * beta
| tensorflow.reduce_sum | 279 |
import tensorflow as tf
print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1))
return summary
class PPO_HC(PPO):
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
# sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
| tensorflow.layers.dense | 280 |
from tensorflow.contrib.slim.python.slim.data import dataset
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
| tensorflow.contrib.slim.python.slim.data.dataset.Dataset | 281 |
from tensorflow.contrib.data import Iterator
temp_val_data['X'][i * 2 + 1, :, :, :] = self.val_data['X'][i, :, sh[2] // 2:, :]
temp_val_data['Y'][i * 2, :, :] = self.val_data['Y'][i, :, :sh[2] // 2]
temp_val_data['Y'][i * 2 + 1, :, :] = self.val_data['Y'][i, :, sh[2] // 2:]
self.val_data = temp_val_data
def init_tfdata(self, batch_size, main_dir, resize_shape, mode='train'):
self.data_session = tf.Session()
print("Creating the iterator for training data")
with tf.device('/cpu:0'):
segdl = SegDataLoader(main_dir, batch_size, (resize_shape[0], resize_shape[1]), resize_shape,
# * 2), resize_shape,
'data/cityscapes_tfdata/train.txt')
iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes)
next_batch = iterator.get_next()
self.init_op = iterator.make_initializer(segdl.data_tr)
self.data_session.run(self.init_op)
print("Loading Validation data in memoryfor faster training..")
self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"),
'Y': np.load(self.args.data_dir + "Y_val.npy")}
# self.crop()
# import cv2
# cv2.imshow('crop1', self.val_data['X'][0,:,:,:])
# cv2.imshow('crop2', self.val_data['X'][1,:,:,:])
| tensorflow.contrib.data.Iterator.from_structure | 282 |
import tensorflow as tf
search_space = Box([0, 0], [1, 1])
num_initial_points = 3
initial_query_points = search_space.sample(num_initial_points)
initial_observations = objective(initial_query_points.numpy(), sleep=False)
initial_data = Dataset(
query_points=initial_query_points,
observations=tf.constant(initial_observations, dtype=tf.float64),
)
import gpflow
from trieste.models.gpflow import GaussianProcessRegression
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
kernel = gpflow.kernels.RBF(variance=variance)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr)
# these imports will be used later for optimization
from trieste.acquisition import LocalPenalizationAcquisitionFunction
from trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization
from trieste.ask_tell_optimization import AskTellOptimizer
# %% [markdown]
# ## Multiprocessing setup
#
| tensorflow.math.reduce_variance | 283 |
import tensorflow as tf
if not forward_only:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell, output_keep_prob=self.dropout_output)
# lstm_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell] * 4, state_is_tuple=True)
if not forward_only:
embed_inputs = tf.nn.dropout(embed_inputs, keep_prob=self.dropout_input)
rnn_outputs, output_states = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=embed_inputs,
dtype=tf.float32,
sequence_length=self.seq_len,
) ## (batch_size, seq_len, num_hidden)
| tensorflow.nn.dynamic_rnn | 284 |
import tensorflow as tf
if is_max_pool:
x = tf.nn.max_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name)
| tensorflow.nn.max_pool3d | 285 |
import tensorflow as tf
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
x = tf.convert_to_tensor(x_color, "float32")
x_coori = tf.convert_to_tensor(x_coori, "float32")
def loop_analysis(element):
x = tf.expand_dims(element[0], 0)
x_coori = tf.expand_dims(element[1], 0)
y = analysis_transform(x_coori,x)
return tf.squeeze(y,axis=0)
element = [x,x_coori]
ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform")
def loop_hyper_encoder(y):
y = tf.expand_dims(y, 0)
z = hyper_encoder(y)
return tf.squeeze(z,axis=0)
zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Hyper Encoder")
z_hats, _ = entropy_bottleneck(zs, False)
print("Quantize hyperprior")
| tensorflow.map_fn | 286 |
import tensorflow as tf
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (512, 512))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def main(_):
train_examples = info.splits['train'].num_examples
batch_size = 8
steps_per_epoch = train_examples // batch_size
| tensorflow.image.resize | 287 |
from tensorflow.contrib.opt import ScipyOptimizerInterface
if type(method) is str:
success_msg = "SciPy optimizer completed successfully."
options = {'maxiter': maxiter, 'disp': True}
options.update(kw)
optimizer = ScipyOptimizerInterface(
loss, var_list=variables, method=method,
options=options
)
| tensorflow.contrib.opt.ScipyOptimizerInterface | 288 |
import tensorflow as tf
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
# pred_annotation, logits = inference(image, keep_probability,z)
# tf.summary.image("input_image", image, max_outputs=2)
# tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
# tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
# labels=tf.squeeze(annotation, squeeze_dims=[3]),
# name="entropy")))
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
mask2__ = tf.ones([FLAGS.batch_size,78,78,3])
mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])
mask2 = mask2_ - mask
pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss0 = tf.reduce_mean(tf.abs(z))
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
| tensorflow.pad | 289 |
import tensorflow as tf
Train RNN graph for multiple series
"""
def train_rnn_multi(raw_data_x, raw_data_y, val_data_x, val_data_y, timeindex_train, timeindex_val, g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):
with tf.Session() as sess:
"initialize the variables"
sess.run(tf.global_variables_initializer())
"see the trainable variables"
# print("The trainable variables are:")
variable_names = [v.name for v in tf.trainable_variables()]
variable_shapes = [v.get_shape() for v in tf.trainable_variables()]
parameter_num = 0
for name, shape in zip(variable_names, variable_shapes):
# print('{}\nShape: {}'.format(name, shape))
parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0]
"train the graph"
training_losses = []
val_losses = []
#set early_stopping cretirion
checks_without_progress = 0
best_loss = np.infty
| tensorflow.trainable_variables | 290 |
from tensorflow.python.framework import constant_op
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
| tensorflow.python.framework.constant_op.constant | 291 |
import tensorflow as tf
#Q_filter_1 = tf.cast(qf1 > min_q,tf.float32)
#Q_filter_2 = tf.cast(qf2 > min_q,tf.float32)
im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph
#im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph
#actor_loss_di1 = tf.reduce_mean(im_loss1)
#actor_loss_di2 = tf.reduce_mean(im_loss2)
self.actor_loss_di = tf.reduce_mean(im_loss1)
imitation_for_priority = tf.reduce_mean(im_loss1,axis=1)
regularizerpi = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi")
all_trainable_weights_pi = tf.trainable_variables('model/pi')
regularization_penalty_pi = tf.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi)
policy_loss = policy_kl_loss + regularization_penalty_pi + self.actor_loss_di
# Target for value fn regression
| tensorflow.contrib.layers.l1_l2_regularizer | 292 |
import tensorflow as tf
t_flatten = tf.reshape(t, shape=(-1,))
uniques, index = tf.unique(t_flatten)
| tensorflow.unique | 293 |
from tensorflow.python.framework import op_def_registry
def _get_op_def(op):
# pylint: disable=protected-access
if hasattr(op, "_sig"):
return getattr(op, "_sig")
else:
return op_def_registry.get_registered_ops()[op.type]
# pylint: enable=protected-access
def _is_in_placeholders(op, func_arg_placeholders):
return op.values() and (op.values()[0].name in func_arg_placeholders)
| tensorflow.python.framework.op_def_registry.get_registered_ops | 294 |
import tensorflow as tf
### Metrics
global_step = tf.compat.v1.train.get_or_create_global_step()
orig_indices = tf.range(
self._sample_batch_size, dtype=relabel_indices.dtype)
with tf.name_scope("relabelling"):
# How often are the originally commanded goals most optimal?
opt_indices = tf.argmax(logits_vec, axis=1)
orig_is_opt = opt_indices == orig_indices
orig_opt_frac = tf.reduce_mean(tf.cast(orig_is_opt, tf.float32))
tf.compat.v2.summary.scalar(
name="orig_task_optimal", data=orig_opt_frac, step=global_step)
# How often is the relabelled goal optimal?
# The relabel_indices are [B, 1], so we need to remove the extra dim.
relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices
relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32))
tf.compat.v2.summary.scalar(
name="relabel_task_optimal", data=relabel_opt_frac, step=global_step)
| tensorflow.compat.v2.summary.scalar | 295 |
import tensorflow as tf
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for STS-B."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
predictions, label_ids, weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
predictions, label_ids, weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
predictions, label_ids, weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
predictions, label_ids, weights=is_real_example)
# Compute Matthew's correlation
mcc = tf.div_no_nan(
tp * tn - fp * fn,
tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
| tensorflow.metrics.false_negatives | 296 |
import tensorflow as tf
def validation_mapper(byte):
image = tf.image.decode_jpeg(
tf.reshape(byte, shape=[]), 3, **JPEG_OPT)
image = resize_shortest_edge(image, tf.shape(image), 256)
image = center_crop(image, 224)
image = tf.reverse(image, axis=[2]) # to BGR
return image
def training_mapper(byte):
jpeg_shape = tf.image.extract_jpeg_shape(byte) # hwc
bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=tf.zeros(shape=[0, 0, 4]),
min_object_covered=0,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.08, 1.0],
max_attempts=10,
use_image_if_no_bounding_boxes=True)
| tensorflow.image.extract_jpeg_shape | 297 |
import tensorflow as tf
for output in model_options.outputs_to_num_classes
}
for i, image_scale in enumerate(eval_scales):
with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None):
outputs_to_scales_to_logits = multi_scale_logits(
images,
model_options=model_options,
image_pyramid=[image_scale],
is_training=False,
fine_tune_batch_norm=False)
if add_flipped_images:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
outputs_to_scales_to_logits_reversed = multi_scale_logits(
tf.reverse_v2(images, [2]),
model_options=model_options,
image_pyramid=[image_scale],
is_training=False,
fine_tune_batch_norm=False)
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = tf.image.resize_bilinear(
scales_to_logits[_MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
align_corners=True)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits), 4))
| tensorflow.reverse_v2 | 298 |
import tensorflow as tf
lstm_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
| tensorflow.lite.experimental.nn.dynamic_rnn | 299 |