repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
547
141k
apis
sequence
file_path
stringlengths
6
143
api_extract
stringlengths
142
58.4k
tadasdanielius/P5-Vehicle-Detection-And-Tracking
38513e91d863f7fff50703349aacbe5d5bbfae39
from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.models import model_from_json from sklearn.preprocessing import normalize import cv2 import numpy as np import glob import json from keras.layers import merge from keras.layers.core import Lambda from keras.models import Model import tensorflow as tf def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = tf.shape(data) size = tf.concat(0, [shape[:1] // parts, shape[1:]]) stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0]) start = stride * idx return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save all the outputs for merging back together later for l in range(len(outputs)): outputs_all[l].append(outputs[l]) # merge outputs on CPU with tf.device('/cpu:0'): merged = [] for outputs in outputs_all: merged.append(merge(outputs, mode='concat', concat_axis=0)) return Model(input=model.inputs, output=merged) class CNNClassifier: def __init__(self): self.classifier = None def get_model(self, parallel=False): model = Sequential() #model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(64, 64, 3))) model.add(Convolution2D(8, 8, 8, subsample=(4, 4), border_mode="same", activation='elu', name='Conv1')) model.add(Convolution2D(16, 5, 5, subsample=(2, 2), border_mode="same", activation='elu', name='Conv2')) model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same", activation='elu', name='Conv3')) model.add(Flatten()) model.add(ELU()) model.add(Dense(1024, activation='elu')) model.add(Dropout(.5)) model.add(ELU()) model.add(Dense(512, activation='elu')) model.add(Dropout(.5)) model.add(Dense(1, name='output')) model.add(Activation('sigmoid')) if parallel: model = make_parallel(model, 2) #model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) self.model = model return model def _model(self): img_width, img_height = 64, 64 model = Sequential() model.add(Convolution2D(8, 3, 3, input_shape=(img_width, img_height, 3))) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(2, 2))) #model.add(Convolution2D(16, 3, 3)) #model.add(Activation('elu')) #model.add(MaxPooling2D(pool_size=(2, 2))) #model.add(Convolution2D(32, 3, 3)) #model.add(Activation('elu')) #model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512)) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) #model = make_parallel(model, 2) self.model = model def compile(self): self.model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary', metrics=['accuracy']) def save(self): model_json = self.model.to_json() with open("./model.json", "w") as json_file: json.dump(model_json, json_file) self.model.save_weights("./model.h5") print("Saved model to disk") def load(self): with open('./model.json', 'r') as jfile: self.model = model_from_json(json.load(jfile)) self.compile() self.model.load_weights('./model.h5') def get_list(self): vehicles = np.array(glob.glob('training_data/vehicles/*/*')) y_vehicles = np.zeros(vehicles.shape) + 1 non_vehicles = np.array(glob.glob('training_data/non-vehicles/*/*')) y_non_vehicles = np.zeros(non_vehicles.shape) X_data = np.concatenate((vehicles, non_vehicles)) Y_data = np.concatenate((y_vehicles, y_non_vehicles)) return X_data, Y_data def predict(self, image): #img = np.copy(image) #img = cv2.resize(img, (64, 64)) x = image[None, :, :, :] result = self.model.predict(x, 1) return result def train(self, file_list, labels, test_size=0.2, nb_epoch=30, batch_size=128): X_train, X_test, Y_train, Y_test = train_test_split(file_list, labels, test_size=test_size, random_state=100) test_images = build_images(X_test) train_images = build_images(X_train) train_datagen = ImageDataGenerator( rescale=1. / 255, shear_range=0.05, zoom_range=0.05, width_shift_range=0.1, height_shift_range=0.1, rotation_range=5, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow(train_images, Y_train, batch_size) test_generator = test_datagen.flow(test_images, Y_test, batch_size) nb_train_samples = (batch_size-1)*100 nb_validation_samples = (batch_size-1)*20 #self.get_model(parallel=False) self._model() self.compile() self.model.fit_generator( train_generator, samples_per_epoch=nb_train_samples, nb_epoch=nb_epoch, show_accuracy=True, validation_data=test_generator, nb_val_samples=nb_validation_samples) def build_images(x): images = np.zeros((len(x), 64, 64, 3)) for idx, img_fname in enumerate(x): im = cv2.imread(img_fname) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_AREA) images[idx] = im return images def do_all(nb_epoch=30, batch_size=256): clf = CNNClassifier() x, y = clf.get_list() clf.train(x, y, nb_epoch=nb_epoch, batch_size=batch_size) clf.save()
[ "tensorflow.device", "tensorflow.concat", "tensorflow.shape", "tensorflow.slice", "sklearn.model_selection.train_test_split", "numpy.concatenate", "tensorflow.name_scope", "numpy.zeros" ]
sdc/detection/cnn_classifier.py
[(22, 'tensorflow.shape', 'tf.shape', (['data'], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.concat', 'tf.concat', (['(0)', '[shape[:1] // parts, shape[1:]]'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.concat', 'tf.concat', (['(0)', '[shape[:1] // parts, shape[1:] * 0]'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.slice', 'tf.slice', (['data', 'start', 'size'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (59, 'keras.models.Model', 'Model', ([], {'input': 'model.inputs', 'output': 'merged'}), False, 'from keras.models import Model\n'), (67, 'keras.models.Sequential', 'Sequential', ([], {}), False, 'from keras.models import Sequential\n'), (89, 'keras.models.Sequential', 'Sequential', ([], {}), False, 'from keras.models import Sequential\n'), (132, 'numpy.zeros', 'np.zeros', (['non_vehicles.shape'], {}), True, 'import numpy as np\n'), (133, 'numpy.concatenate', 'np.concatenate', (['(vehicles, non_vehicles)'], {}), True, 'import numpy as np\n'), (134, 'numpy.concatenate', 'np.concatenate', (['(y_vehicles, y_non_vehicles)'], {}), True, 'import numpy as np\n'), (145, 'sklearn.model_selection.train_test_split', 'train_test_split', (['file_list', 'labels'], {'test_size': 'test_size', 'random_state': '(100)'}), False, 'from sklearn.model_selection import train_test_split\n'), (150, 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.05)', 'zoom_range': '(0.05)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'rotation_range': '(5)', 'horizontal_flip': '(True)'}), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), (158, 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), (179, 'cv2.imread', 'cv2.imread', (['img_fname'], {}), False, 'import cv2\n'), (180, 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), False, 'import cv2\n'), (181, 'cv2.resize', 'cv2.resize', (['im', '(64, 64)'], {'interpolation': 'cv2.INTER_AREA'}), False, 'import cv2\n'), (34, 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), True, 'import tensorflow as tf\n'), (69, 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(8)', '(8)'], {'subsample': '(4, 4)', 'border_mode': '"""same"""', 'activation': '"""elu"""', 'name': '"""Conv1"""'}), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), (70, 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(5)', '(5)'], {'subsample': '(2, 2)', 'border_mode': '"""same"""', 'activation': '"""elu"""', 'name': '"""Conv2"""'}), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), (71, 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(5)', '(5)'], {'subsample': '(2, 2)', 'border_mode': '"""same"""', 'activation': '"""elu"""', 'name': '"""Conv3"""'}), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), (72, 'keras.layers.Flatten', 'Flatten', ([], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (73, 'keras.layers.ELU', 'ELU', ([], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (74, 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""elu"""'}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (75, 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (76, 'keras.layers.ELU', 'ELU', ([], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (77, 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""elu"""'}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (78, 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (79, 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""output"""'}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (80, 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (90, 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(3)', '(3)'], {'input_shape': '(img_width, img_height, 3)'}), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), (91, 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (92, 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), (102, 'keras.layers.Flatten', 'Flatten', ([], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (103, 'keras.layers.Dense', 'Dense', (['(512)'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (104, 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (105, 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\n'), (117, 'json.dump', 'json.dump', (['model_json', 'json_file'], {}), False, 'import json\n'), (129, 'glob.glob', 'glob.glob', (['"""training_data/vehicles/*/*"""'], {}), False, 'import glob\n'), (130, 'numpy.zeros', 'np.zeros', (['vehicles.shape'], {}), True, 'import numpy as np\n'), (131, 'glob.glob', 'glob.glob', (['"""training_data/non-vehicles/*/*"""'], {}), False, 'import glob\n'), (35, 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%d' % i)"], {}), True, 'import tensorflow as tf\n'), (57, 'keras.layers.merge', 'merge', (['outputs'], {'mode': '"""concat"""', 'concat_axis': '(0)'}), False, 'from keras.layers import merge\n'), (123, 'json.load', 'json.load', (['jfile'], {}), False, 'import json\n'), (41, 'keras.layers.core.Lambda', 'Lambda', (['get_slice'], {'output_shape': 'input_shape', 'arguments': "{'idx': i, 'parts': gpu_count}"}), False, 'from keras.layers.core import Lambda\n')]
LSanselme/kerod
cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902
# Copyright 2017 The TensorFlow Authors and modified by Emilien Garreau. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Method to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired sample_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired sample_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always sample_size, even when number of examples set to True in indicator is less than sample_size. """ import tensorflow as tf from kerod.utils import ops def subsample_indicator(indicator, num_samples): """Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Arguments: - *indicator*: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. - *num_samples*: int32 scalar tensor Returns: A boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled. """ negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels, positive_fraction=0.5, dtype=tf.float32): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: A boolean tensor of shape [M, N], True for entries which are sampled. """ def _minibatch_subsample_fn(inputs): indicators, targets = inputs return sample_balanced_positive_negative(tf.cast(indicators, tf.bool), sample_size, tf.cast(targets, tf.bool), positive_fraction=positive_fraction) return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels], dtype=tf.bool, parallel_iterations=16, back_prop=True), dtype=dtype)
[ "tensorflow.shape", "tensorflow.logical_or", "tensorflow.reshape", "tensorflow.equal", "tensorflow.cast", "tensorflow.random.shuffle", "tensorflow.map_fn", "tensorflow.where", "tensorflow.logical_not", "tensorflow.size", "tensorflow.logical_and" ]
src/kerod/core/sampling_ops.py
[(55, 'tensorflow.where', 'tf.where', (['indicator'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.random.shuffle', 'tf.random.shuffle', (['indices'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.reshape', 'tf.reshape', (['indices', '[-1]'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.equal', 'tf.equal', (['selected_indicator', '(1)'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.logical_not', 'tf.logical_not', (['labels'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.logical_and', 'tf.logical_and', (['labels', 'indicator'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.logical_and', 'tf.logical_and', (['negative_idx', 'indicator'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.logical_or', 'tf.logical_or', (['sampled_pos_idx', 'sampled_neg_idx'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.size', 'tf.size', (['indices'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.reshape', 'tf.reshape', (['num_samples', '[1]'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.cast', 'tf.cast', (['sampled_pos_idx', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.map_fn', 'tf.map_fn', (['_minibatch_subsample_fn', '[indicators, labels]'], {'dtype': 'tf.bool', 'parallel_iterations': '(16)', 'back_prop': '(True)'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.shape', 'tf.shape', (['indicator'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.cast', 'tf.cast', (['positive_idx'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.cast', 'tf.cast', (['indicators', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.cast', 'tf.cast', (['targets', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.cast', 'tf.cast', (['num_sampled_pos'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n')]
luoyi1hao/ACRN_Chest_X-ray_IA
b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a
from data import DataHandler from models import ACRegNet import tensorflow as tf from utils import get_random_batch, read_config_file, create_dir RUN_IN_GPU = False def train_acregnet_model(config): tf.reset_default_graph() tf_config = tf.ConfigProto() if RUN_IN_GPU: tf_config.gpu_options.allow_growth = True sess = tf.Session(config=tf_config) train_ims, _ = DataHandler.load_images(config['train_ims_file']) train_lbs, _ = DataHandler.load_labels(config['train_lbs_file']) print('Loading training data...done') acregnet = ACRegNet(sess, config, 'ACRegNet', is_train=True) print('Building AC-RegNet model...done') print('Training...') for i in range(config['iterations']): batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y = get_random_batch( train_ims, config['batch_size'], train_lbs) cur_loss = acregnet.fit( batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y) print('Iteration {:>8d}/{}: Loss: {}'.format( i + 1, config['iterations'], cur_loss)) acregnet.save(config['ckpt_dir']) print('Saving current AC-RegNet model...done') print('Training...done') tf.reset_default_graph() sess.close() if __name__ == "__main__": config = read_config_file('./config/JSRT/ACRegNet.cfg') create_dir(config['ckpt_dir']) train_acregnet_model(config)
[ "tensorflow.ConfigProto", "tensorflow.reset_default_graph", "tensorflow.Session" ]
acregnet/train_acregnet.py
[(11, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), True, 'import tensorflow as tf\n'), (19, 'data.DataHandler.load_images', 'DataHandler.load_images', (["config['train_ims_file']"], {}), False, 'from data import DataHandler\n'), (20, 'data.DataHandler.load_labels', 'DataHandler.load_labels', (["config['train_lbs_file']"], {}), False, 'from data import DataHandler\n'), (23, 'models.ACRegNet', 'ACRegNet', (['sess', 'config', '"""ACRegNet"""'], {'is_train': '(True)'}), False, 'from models import ACRegNet\n'), (40, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (45, 'utils.read_config_file', 'read_config_file', (['"""./config/JSRT/ACRegNet.cfg"""'], {}), False, 'from utils import get_random_batch, read_config_file, create_dir\n'), (46, 'utils.create_dir', 'create_dir', (["config['ckpt_dir']"], {}), False, 'from utils import get_random_batch, read_config_file, create_dir\n'), (28, 'utils.get_random_batch', 'get_random_batch', (['train_ims', "config['batch_size']", 'train_lbs'], {}), False, 'from utils import get_random_batch, read_config_file, create_dir\n')]
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.contrib.layers.sparse_feature_cross.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy from tensorflow.contrib import layers from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test class SparseCrossOpTest(test.TestCase): def test_simple(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1'], ['batch2-FC1-F1', 'batch2-FC1-F2']]), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1', 'batch2-FC2-F2']]) ]) expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_dense(self): """Tests only dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'], ['batch2-FC1-F1', 'batch2-FC1-F2']], dtypes.string), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2', 'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2' ], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_mixed_string_sparse(self): """Tests mixed type.""" op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([[11], [333, 55555]]), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1', 'batch2-FC2-F2']]) ]) expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [ '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_mixed_string_dense(self): """Tests mixed dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor([[ '11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1', '333_X_batch1-FC2-F2' ], [ '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2', '999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_sparse_cross_dense(self): """Tests sparse and dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1'], ['batch2-FC1-F1', 'batch2-FC1-F2']]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor( [['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_sparse_input(self): """Tests mixed type sparse and dense inputs.""" op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([[11], [333, 5555]]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor( [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [ '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_permutation_3x3x3(self): """Tests 3x3x3 permutation. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor( [['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]), self._sparse_tensor( [['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']]) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_permutation_3x1x2(self): """Tests 3x1x2 permutation. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_large_batch(self): """Tests with large batch size to force multithreding. """ batch_size = 5000 col1 = [] col2 = [] col3 = [] for b in range(batch_size): col1.append( ['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b]) col2.append(['batch%d-FC2-F1' % b]) col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b]) op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor(col1), self._sparse_tensor(col2), self._sparse_tensor(col3) ]) col_out = [] for b in range(batch_size): col_out.append([ 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b), 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b), 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b) ]) expected_out = self._sparse_tensor(col_out) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_one_column_empty(self): """Tests when one column is empty. The crossed tensor should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]), self._sparse_tensor([], 1), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ]) with self.test_session() as sess: self._assert_sparse_tensor_empty(sess.run(op)) def test_some_columns_empty(self): """Tests when more than one columns are empty. Cross for the corresponding batch should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2' ]], 2) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_all_columns_empty(self): """Tests when all columns are empty. The crossed tensor should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([]), self._sparse_tensor([]), self._sparse_tensor([]) ]) with self.test_session() as sess: self._assert_sparse_tensor_empty(sess.run(op)) def test_hashed_output_zero_bucket(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[3735511728867393167]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_zero_bucket_v2(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[1971693436396284976]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) # TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed. def test_hashed_output(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, num_buckets=100) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[74]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_v2(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, num_buckets=100, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[83]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_v1_has_collision(self): """Tests the old version of the fingerprint concatenation has collisions. """ # The last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.equal(values[0], values[1]).all()) def test_hashed_output_v2_has_no_collision(self): """Tests the new version of the fingerprint concatenation has no collisions. """ # Although the last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses shouldn't collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.not_equal(values[0], values[1]).all()) def test_hashed_3x1x2(self): """Tests 3x1x2 permutation with hashed output. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ], hashed_output=True, num_buckets=1000) with self.test_session() as sess: out = sess.run(op) self.assertEqual(6, len(out.values)) self.assertAllEqual([[0, i] for i in range(6)], out.indices) self.assertTrue(all(x < 1000 and x >= 0 for x in out.values)) all_values_are_different = len(out.values) == len(set(out.values)) self.assertTrue(all_values_are_different) def _assert_sparse_tensor_empty(self, sp): self.assertEquals(0, sp.indices.size) self.assertEquals(0, sp.values.size) # TODO(zakaria): check if we can ignore the first dim of the shape. self.assertEquals(0, sp.dense_shape[1]) def _assert_sparse_tensor_equals(self, sp1, sp2): self.assertAllEqual(sp1.indices.eval(), sp2.indices) self.assertAllEqual(sp1.values.eval(), sp2.values) self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape) def _sparse_tensor(self, data, batch_size=-1): """Generates a SparseTensor. Args: data: Should be a list of list of strings or int64. Each item of the outer list represents a batch. Each item of the batch is a feature of a specific feature column. batch_size: optional batch size, especially for cases when data has no entry for some batches. Returns: A SparseTensor. """ indices = [] values = [] max_col_count = 0 for batch, batch_ix in zip(data, range(len(data))): for column, column_ix in zip(batch, range(len(batch))): indices.append([batch_ix, column_ix]) values.append(column) max_col_count = max(max_col_count, column_ix + 1) shape = [batch_size if batch_size != -1 else len(data), max_col_count] value_type = (dtypes.string if not values or isinstance(values[0], str) else dtypes.int64) return sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64, [len(indices), 2]), constant_op.constant(values, value_type, [len(indices)]), constant_op.constant(shape, dtypes.int64)) if __name__ == '__main__': test.main()
[ "tensorflow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross", "tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense", "tensorflow.python.platform.test.main", "tensorflow.python.client.session.Session", "numpy.equal", "numpy.not_equal", "tensorflow.python.framework.constant_op.constant" ]
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
[(437, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (349, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[359], [359 + 1024]]'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (351, 'tensorflow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross', 'sparse_feature_cross_op.sparse_feature_cross', (['[t2, t1]'], {'hashed_output': '(True)', 'num_buckets': '(1024)'}), False, 'from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op\n'), (353, 'tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense', 'sparse_ops.sparse_tensor_to_dense', (['cross'], {}), False, 'from tensorflow.python.ops import sparse_ops\n'), (363, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[359], [359 + 1024]]'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (365, 'tensorflow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross', 'sparse_feature_cross_op.sparse_feature_cross', (['[t2, t1]'], {'hashed_output': '(True)', 'num_buckets': '(1024)', 'hash_key': 'layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY'}), False, 'from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op\n'), (370, 'tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense', 'sparse_ops.sparse_tensor_to_dense', (['cross'], {}), False, 'from tensorflow.python.ops import sparse_ops\n'), (354, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (371, 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), False, 'from tensorflow.python.client import session\n'), (433, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['shape', 'dtypes.int64'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (55, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["[['batch1-FC1-F1', 'batch1-FC1-F2'], ['batch2-FC1-F1', 'batch2-FC1-F2']]", 'dtypes.string'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (58, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["[['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']]", 'dtypes.string'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (90, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[11, 333], [55555, 999999]]', 'dtypes.int64'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (91, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["[['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']]", 'dtypes.string'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (111, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["[['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']]", 'dtypes.string'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (127, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["[['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']]", 'dtypes.string'], {}), False, 'from tensorflow.python.framework import constant_op\n'), (356, 'numpy.equal', 'numpy.equal', (['values[0]', 'values[1]'], {}), False, 'import numpy\n'), (373, 'numpy.not_equal', 'numpy.not_equal', (['values[0]', 'values[1]'], {}), False, 'import numpy\n')]
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Neural network components for hybrid models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import layers from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops class FullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected feed-forward neural network layer.""" def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, self.params.layer_size) for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations = layers.fully_connected(nn_activations, self.params.layer_size) return nn_activations class ManyToOneLayer(hybrid_layer.HybridLayer): def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, 1) # There is always one activation per instance by definition, so squeeze # away the extra dimension. return array_ops.squeeze(nn_activations, squeeze_dims=[1]) class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected flattened feed-forward neural network layer.""" def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = [layers.fully_connected(data, self.params.layer_size)] for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations.append( layers.fully_connected( nn_activations[-1], self.params.layer_size)) nn_activations_tensor = array_ops.concat( nn_activations, 1, name="flattened_nn_activations") return nn_activations_tensor
[ "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.contrib.layers.fully_connected", "tensorflow.python.framework.ops.device" ]
tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
[(35, 'tensorflow.python.framework.ops.device', 'ops.device', (['self.device_assigner'], {}), False, 'from tensorflow.python.framework import ops\n'), (37, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['data', 'self.params.layer_size'], {}), False, 'from tensorflow.contrib import layers\n'), (52, 'tensorflow.python.framework.ops.device', 'ops.device', (['self.device_assigner'], {}), False, 'from tensorflow.python.framework import ops\n'), (54, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['data', '(1)'], {}), False, 'from tensorflow.contrib import layers\n'), (58, 'tensorflow.python.ops.array_ops.squeeze', 'array_ops.squeeze', (['nn_activations'], {'squeeze_dims': '[1]'}), False, 'from tensorflow.python.ops import array_ops\n'), (68, 'tensorflow.python.framework.ops.device', 'ops.device', (['self.device_assigner'], {}), False, 'from tensorflow.python.framework import ops\n'), (79, 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['nn_activations', '(1)'], {'name': '"""flattened_nn_activations"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (41, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['nn_activations', 'self.params.layer_size'], {}), False, 'from tensorflow.contrib import layers\n'), (70, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['data', 'self.params.layer_size'], {}), False, 'from tensorflow.contrib import layers\n'), (75, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['nn_activations[-1]', 'self.params.layer_size'], {}), False, 'from tensorflow.contrib import layers\n')]
calebchoo/modulabs
314d9cd9b607460f8bfea80fc828b1521ca18443
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for downloading and reading MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import numpy from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.python.framework import dtypes from tensorflow.python.platform import gfile SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data def dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot def extract_labels(filename, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels, num_classes) return labels class DataSet(object): def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1] * 784 if self.one_hot: fake_label = [1] + [0] * 9 else: fake_label = 0 return [fake_image for _ in xrange(batch_size)], [ fake_label for _ in xrange(batch_size) ] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): if fake_data: def fake(): return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype) train = fake() validation = fake() test = fake() return base.Datasets(train=train, validation=validation, test=test) TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file) local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = base.maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES) test_images = extract_images(local_file) local_file = base.maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape) validation = DataSet(validation_images, validation_labels, dtype=dtype, reshape=reshape) test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape) return base.Datasets(train=train, validation=validation, test=test) def load_mnist(): return read_data_sets('MNIST_data')
[ "numpy.multiply", "numpy.arange", "tensorflow.contrib.learn.python.learn.datasets.base.Datasets", "numpy.dtype", "numpy.random.shuffle", "numpy.frombuffer", "tensorflow.python.framework.dtypes.as_dtype", "numpy.zeros", "tensorflow.python.platform.gfile.Open", "tensorflow.contrib.learn.python.learn.datasets.base.maybe_download" ]
tensorflow/contrib/learn/python/learn/datasets/mnist.py
[(60, 'numpy.zeros', 'numpy.zeros', (['(num_labels, num_classes)'], {}), False, 'import numpy\n'), (188, 'tensorflow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', (['TRAIN_IMAGES', 'train_dir', '(SOURCE_URL + TRAIN_IMAGES)'], {}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (192, 'tensorflow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', (['TRAIN_LABELS', 'train_dir', '(SOURCE_URL + TRAIN_LABELS)'], {}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (196, 'tensorflow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', (['TEST_IMAGES', 'train_dir', '(SOURCE_URL + TEST_IMAGES)'], {}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (200, 'tensorflow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', (['TEST_LABELS', 'train_dir', '(SOURCE_URL + TEST_LABELS)'], {}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (216, 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (42, 'tensorflow.python.platform.gfile.Open', 'gfile.Open', (['filename', '"""rb"""'], {}), False, 'from tensorflow.python.platform import gfile\n'), (42, 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'f'}), False, 'import gzip\n'), (51, 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), False, 'import numpy\n'), (59, 'numpy.arange', 'numpy.arange', (['num_labels'], {}), False, 'import numpy\n'), (68, 'tensorflow.python.platform.gfile.Open', 'gfile.Open', (['filename', '"""rb"""'], {}), False, 'from tensorflow.python.platform import gfile\n'), (68, 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'f'}), False, 'import gzip\n'), (75, 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), False, 'import numpy\n'), (180, 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), (35, 'numpy.dtype', 'numpy.dtype', (['numpy.uint32'], {}), False, 'import numpy\n'), (95, 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['dtype'], {}), False, 'from tensorflow.python.framework import dtypes\n'), (155, 'numpy.arange', 'numpy.arange', (['self._num_examples'], {}), False, 'import numpy\n'), (156, 'numpy.random.shuffle', 'numpy.random.shuffle', (['perm'], {}), False, 'import numpy\n'), (116, 'numpy.multiply', 'numpy.multiply', (['images', '(1.0 / 255.0)'], {}), False, 'import numpy\n'), (146, 'six.moves.xrange', 'xrange', (['batch_size'], {}), False, 'from six.moves import xrange\n'), (147, 'six.moves.xrange', 'xrange', (['batch_size'], {}), False, 'from six.moves import xrange\n')]
darkxaze/PINNs
f344a907cf8b585e5f667465178c4442b907024d
""" @author: Maziar Raissi """ import sys #sys.path.insert(0, '../../Utilities/') sys.path.append('F:/PINNs-master/PINN/src') import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import scipy.io from scipy.interpolate import griddata import time from itertools import product, combinations from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection from plotting import newfig, savefig from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.gridspec as gridspec np.random.seed(1234) tf.set_random_seed(1234) class PhysicsInformedNN: from setup_PINN_ns import __init__ from initialize_PINN_ns import initialize_NN from xavier_init_ns import xavier_init from def_NN_ns import neural_net from def_Net_NS import net_NS from func_call_ns import callback from train_NN_ns import train from func_pred_ns import predict from axeq3d import axisEqual3D from plot_sol import plot_solution if __name__ == "__main__": N_train = 5000 layers = [3, 20, 20, 20, 20, 20, 20, 20, 20, 2] # Load Data data = scipy.io.loadmat('F:/PINNs-master/PINN/Data/cylinder_nektar_wake.mat') U_star = data['U_star'] # N x 2 x T P_star = data['p_star'] # N x T t_star = data['t'] # T x 1 X_star = data['X_star'] # N x 2 N = X_star.shape[0] T = t_star.shape[0] # Rearrange Data XX = np.tile(X_star[:,0:1], (1,T)) # N x T YY = np.tile(X_star[:,1:2], (1,T)) # N x T TT = np.tile(t_star, (1,N)).T # N x T UU = U_star[:,0,:] # N x T VV = U_star[:,1,:] # N x T PP = P_star # N x T x = XX.flatten()[:,None] # NT x 1 y = YY.flatten()[:,None] # NT x 1 t = TT.flatten()[:,None] # NT x 1 u = UU.flatten()[:,None] # NT x 1 v = VV.flatten()[:,None] # NT x 1 p = PP.flatten()[:,None] # NT x 1 ###################################################################### ######################## Noiseles Data ############################### ###################################################################### # Training Data idx = np.random.choice(N*T, N_train, replace=False) x_train = x[idx,:] y_train = y[idx,:] t_train = t[idx,:] u_train = u[idx,:] v_train = v[idx,:] # Training model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers) model.train(200000) # Test Data snap = np.array([100]) x_star = X_star[:,0:1] y_star = X_star[:,1:2] t_star = TT[:,snap] u_star = U_star[:,0,snap] v_star = U_star[:,1,snap] p_star = P_star[:,snap] # Prediction u_pred, v_pred, p_pred = model.predict(x_star, y_star, t_star) lambda_1_value = model.sess.run(model.lambda_1) lambda_2_value = model.sess.run(model.lambda_2) # Error error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) error_v = np.linalg.norm(v_star-v_pred,2)/np.linalg.norm(v_star,2) error_p = np.linalg.norm(p_star-p_pred,2)/np.linalg.norm(p_star,2) error_lambda_1 = np.abs(lambda_1_value - 1.0)*100 error_lambda_2 = np.abs(lambda_2_value - 0.01)/0.01 * 100 print('Error u: %e' % (error_u)) print('Error v: %e' % (error_v)) print('Error p: %e' % (error_p)) print('Error l1: %.5f%%' % (error_lambda_1)) print('Error l2: %.5f%%' % (error_lambda_2)) # Plot Results plot_solution(X_star, u_pred, 1) plot_solution(X_star, v_pred, 2) plot_solution(X_star, p_pred, 3) plot_solution(X_star, p_star, 4) plot_solution(X_star, p_star - p_pred, 5) # Predict for plotting lb = X_star.min(0) ub = X_star.max(0) nn = 200 x = np.linspace(lb[0], ub[0], nn) y = np.linspace(lb[1], ub[1], nn) X, Y = np.meshgrid(x,y) UU_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') VV_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic') PP_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic') P_exact = griddata(X_star, p_star.flatten(), (X, Y), method='cubic') ###################################################################### ########################### Noisy Data ############################### ###################################################################### noise = 0.01 u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1]) v_train = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1]) # Training model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers) model.train(200000) lambda_1_value_noisy = model.sess.run(model.lambda_1) lambda_2_value_noisy = model.sess.run(model.lambda_2) error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)*100 error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.01)/0.01 * 100 print('Error l1: %.5f%%' % (error_lambda_1_noisy)) print('Error l2: %.5f%%' % (error_lambda_2_noisy)) ###################################################################### ############################# Plotting ############################### ###################################################################### # Load Data data_vort = scipy.io.loadmat('../Data/cylinder_nektar_t0_vorticity.mat') x_vort = data_vort['x'] y_vort = data_vort['y'] w_vort = data_vort['w'] modes = np.asscalar(data_vort['modes']) nel = np.asscalar(data_vort['nel']) xx_vort = np.reshape(x_vort, (modes+1,modes+1,nel), order = 'F') yy_vort = np.reshape(y_vort, (modes+1,modes+1,nel), order = 'F') ww_vort = np.reshape(w_vort, (modes+1,modes+1,nel), order = 'F') box_lb = np.array([1.0, -2.0]) box_ub = np.array([8.0, 2.0]) fig, ax = newfig(1.0, 1.2) ax.axis('off') ####### Row 0: Vorticity ################## gs0 = gridspec.GridSpec(1, 2) gs0.update(top=1-0.06, bottom=1-2/4 + 0.12, left=0.0, right=1.0, wspace=0) ax = plt.subplot(gs0[:, :]) for i in range(0, nel): h = ax.pcolormesh(xx_vort[:,:,i], yy_vort[:,:,i], ww_vort[:,:,i], cmap='seismic',shading='gouraud', vmin=-3, vmax=3) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.plot([box_lb[0],box_lb[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1) ax.plot([box_ub[0],box_ub[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1) ax.plot([box_lb[0],box_ub[0]],[box_lb[1],box_lb[1]],'k',linewidth = 1) ax.plot([box_lb[0],box_ub[0]],[box_ub[1],box_ub[1]],'k',linewidth = 1) ax.set_aspect('equal', 'box') ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_title('Vorticity', fontsize = 10) ####### Row 1: Training data ################## ######## u(t,x,y) ################### gs1 = gridspec.GridSpec(1, 2) gs1.update(top=1-2/4, bottom=0.0, left=0.01, right=0.99, wspace=0) ax = plt.subplot(gs1[:, 0], projection='3d') ax.axis('off') r1 = [x_star.min(), x_star.max()] r2 = [data['t'].min(), data['t'].max()] r3 = [y_star.min(), y_star.max()] for s, e in combinations(np.array(list(product(r1,r2,r3))), 2): if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]: ax.plot3D(*zip(s,e), color="k", linewidth = 0.5) ax.scatter(x_train, t_train, y_train, s = 0.1) ax.contourf(X,UU_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8) ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$') ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$') ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$') ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$u(t,x,y)$') ax.set_xlim3d(r1) ax.set_ylim3d(r2) ax.set_zlim3d(r3) axisEqual3D(ax) ######## v(t,x,y) ################### ax = plt.subplot(gs1[:, 1], projection='3d') ax.axis('off') r1 = [x_star.min(), x_star.max()] r2 = [data['t'].min(), data['t'].max()] r3 = [y_star.min(), y_star.max()] for s, e in combinations(np.array(list(product(r1,r2,r3))), 2): if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]: ax.plot3D(*zip(s,e), color="k", linewidth = 0.5) ax.scatter(x_train, t_train, y_train, s = 0.1) ax.contourf(X,VV_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8) ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$') ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$') ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$') ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$v(t,x,y)$') ax.set_xlim3d(r1) ax.set_ylim3d(r2) ax.set_zlim3d(r3) axisEqual3D(ax) # savefig('./figures/NavierStokes_data') fig, ax = newfig(1.015, 0.8) ax.axis('off') ######## Row 2: Pressure ####################### ######## Predicted p(t,x,y) ########### gs2 = gridspec.GridSpec(1, 2) gs2.update(top=1, bottom=1-1/2, left=0.1, right=0.9, wspace=0.5) ax = plt.subplot(gs2[:, 0]) h = ax.imshow(PP_star, interpolation='nearest', cmap='rainbow', extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_aspect('equal', 'box') ax.set_title('Predicted pressure', fontsize = 10) ######## Exact p(t,x,y) ########### ax = plt.subplot(gs2[:, 1]) h = ax.imshow(P_exact, interpolation='nearest', cmap='rainbow', extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_aspect('equal', 'box') ax.set_title('Exact pressure', fontsize = 10) ######## Row 3: Table ####################### gs3 = gridspec.GridSpec(1, 2) gs3.update(top=1-1/2, bottom=0.0, left=0.0, right=1.0, wspace=0) ax = plt.subplot(gs3[:, :]) ax.axis('off') s = r'$\begin{tabular}{|c|c|}'; s = s + r' \hline' s = s + r' Correct PDE & $\begin{array}{c}' s = s + r' u_t + (u u_x + v u_y) = -p_x + 0.01 (u_{xx} + u_{yy})\\' s = s + r' v_t + (u v_x + v v_y) = -p_y + 0.01 (v_{xx} + v_{yy})' s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' Identified PDE (clean data) & $\begin{array}{c}' s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value, lambda_2_value) s = s + r' \\' s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value, lambda_2_value) s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' Identified PDE (1\% noise) & $\begin{array}{c}' s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy) s = s + r' \\' s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy) s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' \end{tabular}$' ax.text(0.015,0.0,s) savefig('./figures/NavierStokes_prediction')
[ "numpy.asscalar", "numpy.abs", "numpy.random.seed", "numpy.random.choice", "numpy.linspace", "numpy.meshgrid", "numpy.reshape", "numpy.tile", "numpy.linalg.norm", "numpy.std", "matplotlib.pyplot.subplot", "numpy.random.randn", "matplotlib.gridspec.GridSpec", "tensorflow.set_random_seed", "numpy.array" ]
mycode/run_NavierStokes.py
[(7, 'sys.path.append', 'sys.path.append', (['"""F:/PINNs-master/PINN/src"""'], {}), False, 'import sys\n'), (21, 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), True, 'import numpy as np\n'), (22, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), True, 'import tensorflow as tf\n'), (54, 'numpy.tile', 'np.tile', (['X_star[:, 0:1]', '(1, T)'], {}), True, 'import numpy as np\n'), (55, 'numpy.tile', 'np.tile', (['X_star[:, 1:2]', '(1, T)'], {}), True, 'import numpy as np\n'), (74, 'numpy.random.choice', 'np.random.choice', (['(N * T)', 'N_train'], {'replace': '(False)'}), True, 'import numpy as np\n'), (86, 'numpy.array', 'np.array', (['[100]'], {}), True, 'import numpy as np\n'), (115, 'plot_sol.plot_solution', 'plot_solution', (['X_star', 'u_pred', '(1)'], {}), False, 'from plot_sol import plot_solution\n'), (116, 'plot_sol.plot_solution', 'plot_solution', (['X_star', 'v_pred', '(2)'], {}), False, 'from plot_sol import plot_solution\n'), (117, 'plot_sol.plot_solution', 'plot_solution', (['X_star', 'p_pred', '(3)'], {}), False, 'from plot_sol import plot_solution\n'), (118, 'plot_sol.plot_solution', 'plot_solution', (['X_star', 'p_star', '(4)'], {}), False, 'from plot_sol import plot_solution\n'), (119, 'plot_sol.plot_solution', 'plot_solution', (['X_star', '(p_star - p_pred)', '(5)'], {}), False, 'from plot_sol import plot_solution\n'), (125, 'numpy.linspace', 'np.linspace', (['lb[0]', 'ub[0]', 'nn'], {}), True, 'import numpy as np\n'), (126, 'numpy.linspace', 'np.linspace', (['lb[1]', 'ub[1]', 'nn'], {}), True, 'import numpy as np\n'), (127, 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), True, 'import numpy as np\n'), (166, 'numpy.asscalar', 'np.asscalar', (["data_vort['modes']"], {}), True, 'import numpy as np\n'), (167, 'numpy.asscalar', 'np.asscalar', (["data_vort['nel']"], {}), True, 'import numpy as np\n'), (169, 'numpy.reshape', 'np.reshape', (['x_vort', '(modes + 1, modes + 1, nel)'], {'order': '"""F"""'}), True, 'import numpy as np\n'), (170, 'numpy.reshape', 'np.reshape', (['y_vort', '(modes + 1, modes + 1, nel)'], {'order': '"""F"""'}), True, 'import numpy as np\n'), (171, 'numpy.reshape', 'np.reshape', (['w_vort', '(modes + 1, modes + 1, nel)'], {'order': '"""F"""'}), True, 'import numpy as np\n'), (173, 'numpy.array', 'np.array', (['[1.0, -2.0]'], {}), True, 'import numpy as np\n'), (174, 'numpy.array', 'np.array', (['[8.0, 2.0]'], {}), True, 'import numpy as np\n'), (176, 'plotting.newfig', 'newfig', (['(1.0)', '(1.2)'], {}), False, 'from plotting import newfig, savefig\n'), (180, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (182, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), True, 'import matplotlib.pyplot as plt\n'), (186, 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), (203, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (205, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[:, (0)]'], {'projection': '"""3d"""'}), True, 'import matplotlib.pyplot as plt\n'), (226, 'axeq3d.axisEqual3D', 'axisEqual3D', (['ax'], {}), False, 'from axeq3d import axisEqual3D\n'), (229, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[:, (1)]'], {'projection': '"""3d"""'}), True, 'import matplotlib.pyplot as plt\n'), (250, 'axeq3d.axisEqual3D', 'axisEqual3D', (['ax'], {}), False, 'from axeq3d import axisEqual3D\n'), (255, 'plotting.newfig', 'newfig', (['(1.015)', '(0.8)'], {}), False, 'from plotting import newfig, savefig\n'), (260, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (262, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs2[:, (0)]'], {}), True, 'import matplotlib.pyplot as plt\n'), (266, 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), (276, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs2[:, (1)]'], {}), True, 'import matplotlib.pyplot as plt\n'), (280, 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), (291, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (293, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs3[:, :]'], {}), True, 'import matplotlib.pyplot as plt\n'), (319, 'plotting.savefig', 'savefig', (['"""./figures/NavierStokes_prediction"""'], {}), False, 'from plotting import newfig, savefig\n'), (56, 'numpy.tile', 'np.tile', (['t_star', '(1, N)'], {}), True, 'import numpy as np\n'), (101, 'numpy.linalg.norm', 'np.linalg.norm', (['(u_star - u_pred)', '(2)'], {}), True, 'import numpy as np\n'), (101, 'numpy.linalg.norm', 'np.linalg.norm', (['u_star', '(2)'], {}), True, 'import numpy as np\n'), (102, 'numpy.linalg.norm', 'np.linalg.norm', (['(v_star - v_pred)', '(2)'], {}), True, 'import numpy as np\n'), (102, 'numpy.linalg.norm', 'np.linalg.norm', (['v_star', '(2)'], {}), True, 'import numpy as np\n'), (103, 'numpy.linalg.norm', 'np.linalg.norm', (['(p_star - p_pred)', '(2)'], {}), True, 'import numpy as np\n'), (103, 'numpy.linalg.norm', 'np.linalg.norm', (['p_star', '(2)'], {}), True, 'import numpy as np\n'), (105, 'numpy.abs', 'np.abs', (['(lambda_1_value - 1.0)'], {}), True, 'import numpy as np\n'), (149, 'numpy.abs', 'np.abs', (['(lambda_1_value_noisy - 1.0)'], {}), True, 'import numpy as np\n'), (106, 'numpy.abs', 'np.abs', (['(lambda_2_value - 0.01)'], {}), True, 'import numpy as np\n'), (139, 'numpy.random.randn', 'np.random.randn', (['u_train.shape[0]', 'u_train.shape[1]'], {}), True, 'import numpy as np\n'), (140, 'numpy.random.randn', 'np.random.randn', (['v_train.shape[0]', 'v_train.shape[1]'], {}), True, 'import numpy as np\n'), (150, 'numpy.abs', 'np.abs', (['(lambda_2_value_noisy - 0.01)'], {}), True, 'import numpy as np\n'), (139, 'numpy.std', 'np.std', (['u_train'], {}), True, 'import numpy as np\n'), (140, 'numpy.std', 'np.std', (['v_train'], {}), True, 'import numpy as np\n'), (212, 'itertools.product', 'product', (['r1', 'r2', 'r3'], {}), False, 'from itertools import product, combinations\n'), (236, 'itertools.product', 'product', (['r1', 'r2', 'r3'], {}), False, 'from itertools import product, combinations\n'), (213, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n'), (213, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n'), (213, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n'), (237, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n'), (237, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n'), (237, 'numpy.abs', 'np.abs', (['(s - e)'], {}), True, 'import numpy as np\n')]
egonrian/google-research
2c0043ecd507e75e2df9973a3015daf9253e1467
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Implements data augmentations for cifar10/cifar100.""" from typing import Dict from absl import flags import tensorflow as tf from flax_models.cifar.datasets import auto_augment FLAGS = flags.FLAGS flags.DEFINE_integer('cutout_length', 16, 'Length (in pixels) of the cutout patch. Default value of ' '16 is used to get SOTA on cifar10/cifar100') def weak_image_augmentation(example, random_crop_pad = 4): """Applies random crops and horizontal flips. Simple data augmentations that are (almost) always used with cifar. Pad the image with `random_crop_pad` before randomly cropping it to its original size. Also randomly apply horizontal flip. Args: example: An example dict containing an image and a label. random_crop_pad: By how many pixels should the image be padded on each side before cropping. Returns: An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = tf.image.random_flip_left_right(image) image_shape = tf.shape(image) image = tf.pad( image, [[random_crop_pad, random_crop_pad], [random_crop_pad, random_crop_pad], [0, 0]], mode='REFLECT') image = tf.image.random_crop(image, image_shape) return {'image': image, 'label': label} def auto_augmentation(example, dataset_name): """Applies the AutoAugment policy found for the dataset. AutoAugment: Learning Augmentation Policies from Data https://arxiv.org/abs/1805.09501 Args: example: An example dict containing an image and a label. dataset_name: Name of the dataset for which we should return the optimal policy. Returns: An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = auto_augment.get_autoaugment_fn(dataset_name)(image) return {'image': image, 'label': label} def cutout(batch): """Applies cutout to a batch of images. The cut out patch will be replaced by zeros (thus the batch should be normalized before cutout is applied). Reference: Improved Regularization of Convolutional Neural Networks with Cutout https://arxiv.org/abs/1708.04552 Implementation inspired by: third_party/cloud_tpu/models/efficientnet/autoaugment.py Args: batch: A batch of images and labels. Returns: The same batch where cutout has been applied to the images. """ length, replace = FLAGS.cutout_length, 0.0 images, labels = batch['image'], batch['label'] num_channels = tf.shape(images)[3] image_height, image_width = tf.shape(images)[1], tf.shape(images)[2] cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32) lower_pad = tf.maximum(0, cutout_center_height - length // 2) upper_pad = tf.maximum(0, image_height - cutout_center_height - length // 2) left_pad = tf.maximum(0, cutout_center_width - length // 2) right_pad = tf.maximum(0, image_width - cutout_center_width - length // 2) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=images.dtype), padding_dims, constant_values=1) patch = tf.ones_like(images, dtype=images.dtype) * replace, mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, num_channels]) images = tf.where( tf.equal(mask, 0), patch, images) images = tf.squeeze(images, axis=0) return {'image': images, 'label': labels}
[ "tensorflow.image.random_flip_left_right", "tensorflow.shape", "tensorflow.zeros", "tensorflow.maximum", "tensorflow.random.uniform", "tensorflow.equal", "tensorflow.expand_dims", "tensorflow.squeeze", "tensorflow.ones_like", "tensorflow.image.random_crop", "tensorflow.pad", "tensorflow.tile" ]
flax_models/cifar/datasets/augmentation.py
[(28, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""cutout_length"""', '(16)', '"""Length (in pixels) of the cutout patch. Default value of 16 is used to get SOTA on cifar10/cifar100"""'], {}), False, 'from absl import flags\n'), (50, 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.pad', 'tf.pad', (['image', '[[random_crop_pad, random_crop_pad], [random_crop_pad, random_crop_pad], [0, 0]\n ]'], {'mode': '"""REFLECT"""'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', 'image_shape'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_height', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_width', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_height - length // 2)'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_height - cutout_center_height - length // 2)'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_width - length // 2)'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_width - cutout_center_width - length // 2)'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(-1)'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.tile', 'tf.tile', (['mask', '[1, 1, num_channels]'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.squeeze', 'tf.squeeze', (['images'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (76, 'flax_models.cifar.datasets.auto_augment.get_autoaugment_fn', 'auto_augment.get_autoaugment_fn', (['dataset_name'], {}), False, 'from flax_models.cifar.datasets import auto_augment\n'), (101, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.zeros', 'tf.zeros', (['cutout_shape'], {'dtype': 'images.dtype'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.equal', 'tf.equal', (['mask', '(0)'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.ones_like', 'tf.ones_like', (['images'], {'dtype': 'images.dtype'}), True, 'import tensorflow as tf\n')]
muchemwal/models
49fd0a8a61b0e5dab196014bf47de7f62d97c884
import os import io import time import base64 import functools from PIL import Image import numpy as np import tensorflow as tf import tensorflow_hub as hub from helpers import * os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True" class PythonPredictor: def __init__(self, config): # Import TF-Hub module self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1") def predict(self, payload): # Preprocess image hr_image = preprocess_image(payload["image_b64"]) # Run model fake_image = self.hub_module(hr_image) # convert to base64 img = get_image(tf.squeeze(fake_image)) im_file = io.BytesIO() img.save(im_file, format="PNG") im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") return im_bytes
[ "tensorflow.squeeze" ]
tensorflow/super_resolution/syndicai.py
[(20, 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/captain-pool/esrgan-tf2/1"""'], {}), True, 'import tensorflow_hub as hub\n'), (31, 'io.BytesIO', 'io.BytesIO', ([], {}), False, 'import io\n'), (30, 'tensorflow.squeeze', 'tf.squeeze', (['fake_image'], {}), True, 'import tensorflow as tf\n')]
ai-nikolai/Retrograph-1
54bd534d47218ca437c422a1abe5b1e995f55d71
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from retrograph.modeling import modeling_adapter as modeling from retrograph.modeling import optimization_adapter as optimization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=20, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.nn.log_softmax", "tensorflow.FixedLenFeature", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.train.init_from_checkpoint", "tensorflow.contrib.data.parallel_interleave", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.gather", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.gfile.Glob", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.nn.bias_add", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.reshape", "tensorflow.variable_scope" ]
training_utility/run_pretraining_adapter.py
[(84, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (310, 'retrograph.modeling.modeling_adapter.get_shape_list', 'modeling.get_shape_list', (['sequence_tensor'], {'expected_rank': '(3)'}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (317, 'tensorflow.reshape', 'tf.reshape', (['(positions + flat_offsets)', '[-1]'], {}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.reshape', 'tf.reshape', (['sequence_tensor', '[batch_size * seq_length, width]'], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.gather', 'tf.gather', (['flat_sequence_tensor', 'flat_positions'], {}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (407, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (412, 'retrograph.modeling.modeling_adapter.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (414, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Input Files ***"""'], {}), True, 'import tensorflow as tf\n'), (452, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size'}), True, 'import tensorflow as tf\n'), (494, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (131, 'retrograph.modeling.modeling_adapter.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (150, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/predictions"""'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.reshape', 'tf.reshape', (['label_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.reshape', 'tf.reshape', (['label_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.one_hot', 'tf.one_hot', (['label_ids'], {'depth': 'bert_config.vocab_size', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(label_weights * per_example_loss)'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/seq_relationship"""'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': '(2)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (422, 'tensorflow.logging.info', 'tf.logging.info', (["(' %s' % input_file)"], {}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (461, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (471, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (482, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (119, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (156, 'retrograph.modeling.modeling_adapter.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (172, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (177, 'retrograph.modeling.optimization_adapter.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), True, 'from retrograph.modeling import optimization_adapter as optimization\n'), (180, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transform"""'], {}), True, 'import tensorflow as tf\n'), (255, 'retrograph.modeling.modeling_adapter.layer_norm', 'modeling.layer_norm', (['input_tensor'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (277, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_probs * one_hot_labels)'], {'axis': '[-1]'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label_weights'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_files'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['input_pattern'], {}), True, 'import tensorflow as tf\n'), (436, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (484, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (294, 'retrograph.modeling.modeling_adapter.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (296, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.constant', 'tf.constant', (['input_files'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.contrib.data.parallel_interleave', 'tf.contrib.data.parallel_interleave', (['tf.data.TFRecordDataset'], {'sloppy': 'is_training', 'cycle_length': 'cycle_length'}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_log_probs', '[-1, masked_lm_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.argmax', 'tf.argmax', (['masked_lm_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_example_loss', '[-1]'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'masked_lm_ids', 'predictions': 'masked_lm_predictions', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'masked_lm_example_loss', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_log_probs', '[-1, next_sentence_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.argmax', 'tf.argmax', (['next_sentence_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'next_sentence_labels', 'predictions': 'next_sentence_predictions'}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'next_sentence_example_loss'}), True, 'import tensorflow as tf\n'), (252, 'retrograph.modeling.modeling_adapter.get_activation', 'modeling.get_activation', (['bert_config.hidden_act'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n'), (253, 'retrograph.modeling.modeling_adapter.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), True, 'from retrograph.modeling import modeling_adapter as modeling\n')]
pizzahan/lingvo
9b85b7ba5d037701302efa807841c05223bc7d1d
# -*- coding: utf-8 -*- # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Encode using wordpiece models. Implements the segmentation algorithm described in the last paragraph of p. 5150, in the following publication: M. Schuster and K. Nakajima, "Japanese and Korean voice search," 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, 2012 https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import tensorflow as tf from lingvo.core.ops import py_x_ops # Must be a large ID. NO_TOKEN = 1 << 31 - 1 NO_TOKEN_STRING = '<unk>' SENTENCE_START_STRING = '<s>' SENTENCE_END_STRING = '</s>' BOW_STR = '▁' class WpmEncoder(object): def __init__(self, wpm_filepath, merge_prob=1.): """Create a WPM encoder. Args: wpm_filepath: a path to the file containing the vocabulary. merge_prob: the probability of merging tokens while encoding. """ # Load vocabulary file. self._pieces = [] with tf.gfile.Open(wpm_filepath, 'r') as f: for line in f.readlines(): line = line.decode('utf-8') piece = line.strip().split('\t')[0] self._pieces.append(piece) self._merge_prob = merge_prob def _TokenToString(self, token): return py_x_ops.vocab_id_to_token(token, vocab=self._pieces) def _StringToToken(self, tokstr): return tf.where( py_x_ops.token_in_vocab(tokstr, vocab=self._pieces), py_x_ops.vocab_token_to_id(tokstr, vocab=self._pieces), tf.broadcast_to(NO_TOKEN, tf.shape(tokstr))) def _MergeTokens(self, tokens): return self._StringToToken( self._TokenToString(tokens[0]) + self._TokenToString(tokens[1])) def _EncodeToIds(self, word): # Below: # * a token is a wordpiece ID. # * the tokens array will be merged in-place. # * the candidates array is an array of size len(tokens) - 1. # It contains the token for the merged wordpiece, if it exists, # -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]). # First, split into basic UTF-8 characters (letters). chars = tf.strings.unicode_split(word, 'UTF-8') tokens = self._StringToToken(chars) tokens = tf.where( tf.equal(tokens, NO_TOKEN), # Unseen character. tf.broadcast_to(self.unk_id, tf.shape(tokens)), tokens) # Create initial candidate list. candidates = tf.map_fn( self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype) def _ShouldMerge(unused_tokens, candidates): """Merge until not possible, or we abort early according to merge_prob.""" return tf.logical_and( tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)), tf.random.uniform([]) < self._merge_prob) def _MergeOneToken(tokens, i): return tf.expand_dims( self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1) def _MergeCandidates(tokens, candidates): """Merge in the reverse binary tree.""" best_id = tf.argmin(candidates, output_type=tf.int32) # Perform the merge at position best_id. tokens = tf.concat( [tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]], axis=0) # Recompute the merge candidates. # Only the neighbors of best_id need to be recomputed. empty = tf.zeros([0], dtype=candidates.dtype) def _MergeLeft(): return tf.concat( [candidates[:best_id - 1], _MergeOneToken(tokens, best_id - 1)], axis=0) left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft) def _MergeRight(): return tf.concat( [_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0) right_candidates = tf.cond( tf.greater_equal(best_id, tf.size(tokens) - 1), lambda: empty, _MergeRight) candidates = tf.concat([left_candidates, right_candidates], axis=0) return tokens, candidates return tf.while_loop( _ShouldMerge, _MergeCandidates, (tokens, candidates), parallel_iterations=1, back_prop=False)[0] def Encode(self, text): """Converts string `text` to integer ids and the encoded string. Encoding includes prefixing the beginning-of-word token to each word. Returns: ids: the encoded integer ids. tokens: the encoded string. """ words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0] num_words = tf.size(words) ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True) def _WordsToIds(i, words, ids_ta): encoded_ids = self._EncodeToIds(BOW_STR + words[i]) ids_ta = ids_ta.scatter( tf.range(ids_ta.size(), ids_ta.size() + tf.size(encoded_ids)), encoded_ids) return i + 1, words, ids_ta _, _, ids_ta = tf.while_loop( lambda i, *_: i < num_words, _WordsToIds, loop_vars=(tf.constant(0, tf.int32), words, ids_ta), parallel_iterations=30, back_prop=False) ids = ids_ta.stack() return ids, self._TokenToString(ids) def Decode(self, ids): txt = tf.strings.reduce_join(self._TokenToString(ids)) txt = tf.strings.regex_replace(txt, BOW_STR, ' ') # Note that this strips spaces from the end of the input as well. # We assume no inputs rely on the existence of trailing whitespace. txt = tf.strings.strip(txt) return txt @property def sentence_start_id(self): return self._pieces.index(SENTENCE_START_STRING) @property def sentence_start_string(self): return SENTENCE_START_STRING @property def sentence_end_id(self): return self._pieces.index(SENTENCE_END_STRING) @property def sentence_end_string(self): return SENTENCE_END_STRING @property def unk_id(self): return self._pieces.index(NO_TOKEN_STRING)
[ "tensorflow.strings.regex_replace", "tensorflow.not_equal", "tensorflow.strings.unicode_split", "tensorflow.concat", "tensorflow.while_loop", "tensorflow.gfile.Open", "tensorflow.TensorArray", "tensorflow.zeros", "tensorflow.shape", "tensorflow.equal", "tensorflow.strings.split", "tensorflow.random.uniform", "tensorflow.constant", "tensorflow.map_fn", "tensorflow.strings.strip", "tensorflow.size", "tensorflow.argmin" ]
lingvo/core/wpm_encoder.py
[(67, 'lingvo.core.ops.py_x_ops.vocab_id_to_token', 'py_x_ops.vocab_id_to_token', (['token'], {'vocab': 'self._pieces'}), False, 'from lingvo.core.ops import py_x_ops\n'), (87, 'tensorflow.strings.unicode_split', 'tf.strings.unicode_split', (['word', '"""UTF-8"""'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.map_fn', 'tf.map_fn', (['self._MergeTokens', '(tokens[:-1], tokens[1:])'], {'dtype': 'tokens.dtype'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.size', 'tf.size', (['words'], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.int32', '(0)'], {'dynamic_size': '(True)'}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.strings.regex_replace', 'tf.strings.regex_replace', (['txt', 'BOW_STR', '""" """'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.strings.strip', 'tf.strings.strip', (['txt'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['wpm_filepath', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (71, 'lingvo.core.ops.py_x_ops.token_in_vocab', 'py_x_ops.token_in_vocab', (['tokstr'], {'vocab': 'self._pieces'}), False, 'from lingvo.core.ops import py_x_ops\n'), (72, 'lingvo.core.ops.py_x_ops.vocab_token_to_id', 'py_x_ops.vocab_token_to_id', (['tokstr'], {'vocab': 'self._pieces'}), False, 'from lingvo.core.ops import py_x_ops\n'), (90, 'tensorflow.equal', 'tf.equal', (['tokens', 'NO_TOKEN'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.argmin', 'tf.argmin', (['candidates'], {'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.concat', 'tf.concat', (['[tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.zeros', 'tf.zeros', (['[0]'], {'dtype': 'candidates.dtype'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.concat', 'tf.concat', (['[left_candidates, right_candidates]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.while_loop', 'tf.while_loop', (['_ShouldMerge', '_MergeCandidates', '(tokens, candidates)'], {'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.shape', 'tf.shape', (['tokstr'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.shape', 'tf.shape', (['tokens'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.equal', 'tf.equal', (['best_id', '(0)'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.strings.split', 'tf.strings.split', (['[text]'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.not_equal', 'tf.not_equal', (['candidates', 'NO_TOKEN'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.size', 'tf.size', (['tokens'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.size', 'tf.size', (['encoded_ids'], {}), True, 'import tensorflow as tf\n')]
pizzahan/lingvo
9b85b7ba5d037701302efa807841c05223bc7d1d
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Lingvo MT layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow as tf from lingvo.core import base_layer from lingvo.core import layers from lingvo.core import layers_with_attention class TransformerStack(base_layer.BaseLayer): """Stacked self- multi-head attention and fully connected layers. With optional layer normalization applied to the final output. See 'Attention Is All You Need' https://arxiv.org/abs/1706.03762 for details. """ @classmethod def Params(cls): """Configs for TransformerStack.""" p = super(TransformerStack, cls).Params() # Transformer related p.Define('model_dim', 1024, 'Characteristic depth (dimension).') p.Define('num_transformer_layers', 6, 'Number of transformer layers.') p.Define('transformer_tpl', layers_with_attention.TransformerLayer.Params(), 'TransformerLayer params tpl.') p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params') p.Define('ln_output', False, 'If set, layer normalization is applied to the final output' ' of the encoder transformer stack.') p.Define('is_transparent', False, 'If set, outputs a merger of embeddings and layer outputs.') p.Define('num_transparent_outputs', 6, 'Number of transparent outputs.') p.Define( 'transparent_merger_tpl', layers.WeightedSumLayer.Params().Set(add_weight_summaries=True), 'Merger op for layer outputs.') p.Define('packed_input', False, 'If True, assumes multiple training samples per input.') p.Define('has_aux_attention', False, 'Allows encoder layers to attend auxiliary inputs.') p.transformer_tpl.tr_atten_tpl.num_attention_heads = 8 p.transformer_tpl.tr_fflayer_tpl.hidden_dim = 8192 return p @base_layer.initializer def __init__(self, params): super(TransformerStack, self).__init__(params) p = self.params with tf.variable_scope(p.name): # Add transformer layers. transformer_layer_params = [] for i in range(p.num_transformer_layers): params = p.transformer_tpl.Copy() params.name = 'trans_%d' % (i) params.source_dim = p.model_dim params.packed_input = p.packed_input params.has_aux_atten = p.has_aux_attention transformer_layer_params.append(params) self.CreateChildren('trans', transformer_layer_params) # Initialize TransformerStack output layer norm if p.ln_output: params = p.ln_tpl.Copy() # Keeping historic 'enc_out_ln' name for checkpoint compatibility. params.name = 'enc_out_ln' params.input_dim = p.model_dim self.CreateChild('layer_norm_out', params) if p.is_transparent: transparent_params = [] if not p.num_transparent_outputs: raise ValueError('num_transparent_outputs should be greater than 0.') for i in range(p.num_transparent_outputs): transparent_param = p.transparent_merger_tpl.Copy() transparent_param.name = 'transparent_%d' % i transparent_param.num_sources = 1 + p.num_transformer_layers transparent_params.append(transparent_param) self.CreateChildren('transparent_merger', transparent_params) def FProp(self, theta, transformer_input, paddings, src_segment_id=None, aux_vecs=None, aux_paddings=None, aux_segment_id=None): """Transforms source sequence of Tensors with Transformers layers. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. transformer_input: A sequence of input Tensors of [time, batch, dim] shape. paddings: A sequence of 0s and 1s indicating input paddings of [time, batch] shape. src_segment_id: A sequence of ints indicating segment ids of [time, batch] shape. aux_vecs: A sequence of input Tensors of [aux_time, batch, dim] shape, as context for the cross-attention layer. aux_paddings: A sequence of 0s and 1s indicating input paddings of [aux_time, batch] shape. aux_segment_id: A sequence of ints indicating segment ids of [aux_time, batch] shape. Returns: (outputs, out_paddings, segment_ids) tuple. `outputs` is of the shape [time, batch, depth], and `out_paddings` has shape [time, batch]. If is_transparent is True, can return a list of num_transformer_layers tensors of shape [time, batch, depth] if `p.is_eval` is False, and a [time, batch, depth, num_transparent_outputs] tensor if `p.is_eval` is True. If packed_input is True, also returns segment_id, otherwise returns None. """ p = self.params if p.packed_input: assert src_segment_id is not None, ('Need to specify src_segment_id if ' 'packed input is supported.') outputs_list = [transformer_input] with tf.name_scope(p.name): for i, transformer_l in enumerate(self.trans): # For encoder, keys, values and queries are the same transformer_output, _ = transformer_l.FProp( theta.trans[i], transformer_input, paddings, aux_vecs=aux_vecs, aux_paddings=aux_paddings, source_segment_id=src_segment_id, aux_segment_id=aux_segment_id) transformer_input = transformer_output outputs_list.append(transformer_output) if p.ln_output: transformer_output = self.layer_norm_out.FProp(theta.layer_norm_out, transformer_output) # When is_transparent is set, it outputs a list of tensors during # training and the stacked tensors otherwise. This dual behavior is meant # to avoid excessive memory usage during training (which was prohibiting # training on TPUs), and simplify the beam search interface. if p.is_transparent: if p.num_transparent_outputs == 1: transformer_output = self.transparent_merger[0].FProp( theta.transparent_merger[0], outputs_list) else: transformer_output = [] for i in range(p.num_transparent_outputs): merged_outputs = self.transparent_merger[i].FProp( theta.transparent_merger[i], outputs_list) transformer_output.append(merged_outputs) if p.is_eval: transformer_output = tf.stack(transformer_output, 3) return transformer_output, paddings, src_segment_id
[ "tensorflow.variable_scope", "tensorflow.stack", "tensorflow.name_scope" ]
lingvo/tasks/mt/layers.py
[(45, 'lingvo.core.layers_with_attention.TransformerLayer.Params', 'layers_with_attention.TransformerLayer.Params', ([], {}), False, 'from lingvo.core import layers_with_attention\n'), (48, 'lingvo.core.layers.LayerNorm.Params', 'layers.LayerNorm.Params', ([], {}), False, 'from lingvo.core import layers\n'), (73, 'tensorflow.variable_scope', 'tf.variable_scope', (['p.name'], {}), True, 'import tensorflow as tf\n'), (76, 'six.moves.range', 'range', (['p.num_transformer_layers'], {}), False, 'from six.moves import range\n'), (145, 'tensorflow.name_scope', 'tf.name_scope', (['p.name'], {}), True, 'import tensorflow as tf\n'), (98, 'six.moves.range', 'range', (['p.num_transparent_outputs'], {}), False, 'from six.moves import range\n'), (58, 'lingvo.core.layers.WeightedSumLayer.Params', 'layers.WeightedSumLayer.Params', ([], {}), False, 'from lingvo.core import layers\n'), (174, 'six.moves.range', 'range', (['p.num_transparent_outputs'], {}), False, 'from six.moves import range\n'), (179, 'tensorflow.stack', 'tf.stack', (['transformer_output', '(3)'], {}), True, 'import tensorflow as tf\n')]
MuAuan/cheating_DL
e8c543d83c304ca072b479cf34fe0a07b58ec6e3
#grad_cam #[keras-grad-cam/grad-cam.py](https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py) from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions) from keras.models import Model from keras.preprocessing import image from keras.layers.core import Lambda from keras.models import Sequential from tensorflow.python.framework import ops import keras.backend as K import tensorflow as tf import numpy as np import keras import sys import cv2 #from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions #from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions #from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions def target_category_loss(x, category_index, nb_classes): return tf.multiply(x, K.one_hot([category_index], nb_classes)) def target_category_loss_output_shape(input_shape): return input_shape def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) def load_image(path): img_path = sys.argv[1] img = image.load_img(img_path, target_size=(224,224)) #299,299)) #224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x def register_gradient(): if "GuidedBackProp" not in ops._gradient_registry._registry: @ops.RegisterGradient("GuidedBackProp") def _GuidedBackProp(op, grad): dtype = op.inputs[0].dtype return grad * tf.cast(grad > 0., dtype) * \ tf.cast(op.inputs[0] > 0., dtype) def compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98 input_img = model.input layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) #print(layer_dict) layer_output = layer_dict[activation_layer].output max_output = K.max(layer_output, axis=3) saliency = K.gradients(K.sum(max_output), input_img)[0] return K.function([input_img, K.learning_phase()], [saliency]) def modify_backprop(model, name): g = tf.get_default_graph() with g.gradient_override_map({'Relu': name}): # get layers that have an activation layer_dict = [layer for layer in model.layers[1:] if hasattr(layer, 'activation')] # replace relu activation for layer in layer_dict: if layer.activation == keras.activations.relu: layer.activation = tf.nn.relu # re-instanciate a new model new_model = VGG16(weights='imagenet') #new_model = ResNet50(weights='imagenet') new_model.summary() return new_model def deprocess_image(x): ''' Same normalization as in: https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py ''' if np.ndim(x) > 3: x = np.squeeze(x) # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_dim_ordering() == 'th': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x def _compute_gradients(tensor, var_list): grads = tf.gradients(tensor, var_list) return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)] def grad_cam(input_model, image, category_index, layer_name): nb_classes = 1000 target_layer = lambda x: target_category_loss(x, category_index, nb_classes) x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output) model = Model(inputs=input_model.input, outputs=x) #model.summary() loss = K.sum(model.output) conv_output = [l for l in model.layers if l.name == layer_name][0].output #is grads = normalize(_compute_gradients(loss, [conv_output])[0]) gradient_function = K.function([model.input], [conv_output, grads]) output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] weights = np.mean(grads_val, axis = (0, 1)) cam = np.ones(output.shape[0 : 2], dtype = np.float32) for i, w in enumerate(weights): cam += w * output[:, :, i] cam = cv2.resize(cam, (224,224)) #299,299)) #224, 224)) cam = np.maximum(cam, 0) heatmap = cam / np.max(cam) #Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET) cam = np.float32(cam) + np.float32(image) cam = 255 * cam / np.max(cam) return np.uint8(cam), heatmap preprocessed_input = load_image(sys.argv[1]) model = VGG16(weights='imagenet') #model = VGG19(weights='imagenet') #model = InceptionV3(weights='imagenet') #model = ResNet50(weights = 'imagenet') #model.summary() target_layer = 'block5_conv3' #'activation_49' add_16 "block5_conv3" predictions = model.predict(preprocessed_input) register_gradient() guided_model = modify_backprop(model, 'GuidedBackProp') guided_model.summary() for i in range(5): top_1 = decode_predictions(predictions)[0][i] print(predictions.argsort()[0][::-1][i]) print('Predicted class:') print('%s (%s) with probability %.2f' % (top_1[1], top_1[0], top_1[2])) predicted_class = predictions.argsort()[0][::-1][i] #np.argmax(predictions) cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, target_layer) cv2.imwrite("gradcam"+str(top_1[1])+".jpg", cam) saliency_fn = compile_saliency_function(guided_model) saliency = saliency_fn([preprocessed_input, 0]) gradcam = saliency[0] * heatmap[..., np.newaxis] cv2.imwrite("guided_gradcam"+str(top_1[1])+".jpg", deprocess_image(gradcam))
[ "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "tensorflow.python.framework.ops.RegisterGradient", "numpy.clip", "numpy.min", "numpy.uint8", "numpy.squeeze", "tensorflow.cast", "tensorflow.gradients", "numpy.ones", "numpy.ndim", "numpy.max", "tensorflow.zeros_like", "numpy.mean", "numpy.float32", "tensorflow.get_default_graph" ]
grad-cam_5category.py
[(136, 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""'}), False, 'from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\n'), (34, 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), True, 'import numpy as np\n'), (35, 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), False, 'from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\n'), (51, 'keras.backend.max', 'K.max', (['layer_output'], {'axis': '(3)'}), True, 'import keras.backend as K\n'), (56, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (88, 'numpy.clip', 'np.clip', (['x', '(0)', '(1)'], {}), True, 'import numpy as np\n'), (98, 'tensorflow.gradients', 'tf.gradients', (['tensor', 'var_list'], {}), True, 'import tensorflow as tf\n'), (105, 'keras.models.Model', 'Model', ([], {'inputs': 'input_model.input', 'outputs': 'x'}), False, 'from keras.models import Model\n'), (107, 'keras.backend.sum', 'K.sum', (['model.output'], {}), True, 'import keras.backend as K\n'), (110, 'keras.backend.function', 'K.function', (['[model.input]', '[conv_output, grads]'], {}), True, 'import keras.backend as K\n'), (115, 'numpy.mean', 'np.mean', (['grads_val'], {'axis': '(0, 1)'}), True, 'import numpy as np\n'), (116, 'numpy.ones', 'np.ones', (['output.shape[0:2]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (121, 'cv2.resize', 'cv2.resize', (['cam', '(224, 224)'], {}), False, 'import cv2\n'), (122, 'numpy.maximum', 'np.maximum', (['cam', '(0)'], {}), True, 'import numpy as np\n'), (127, 'numpy.min', 'np.min', (['image'], {}), True, 'import numpy as np\n'), (128, 'numpy.minimum', 'np.minimum', (['image', '(255)'], {}), True, 'import numpy as np\n'), (21, 'keras.backend.one_hot', 'K.one_hot', (['[category_index]', 'nb_classes'], {}), True, 'import keras.backend as K\n'), (40, 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""GuidedBackProp"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (69, 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""'}), False, 'from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\n'), (79, 'numpy.ndim', 'np.ndim', (['x'], {}), True, 'import numpy as np\n'), (80, 'numpy.squeeze', 'np.squeeze', (['x'], {}), True, 'import numpy as np\n'), (92, 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), True, 'import keras.backend as K\n'), (104, 'keras.layers.core.Lambda', 'Lambda', (['target_layer'], {'output_shape': 'target_category_loss_output_shape'}), False, 'from keras.layers.core import Lambda\n'), (123, 'numpy.max', 'np.max', (['cam'], {}), True, 'import numpy as np\n'), (130, 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), True, 'import numpy as np\n'), (131, 'numpy.float32', 'np.float32', (['cam'], {}), True, 'import numpy as np\n'), (131, 'numpy.float32', 'np.float32', (['image'], {}), True, 'import numpy as np\n'), (132, 'numpy.max', 'np.max', (['cam'], {}), True, 'import numpy as np\n'), (133, 'numpy.uint8', 'np.uint8', (['cam'], {}), True, 'import numpy as np\n'), (52, 'keras.backend.sum', 'K.sum', (['max_output'], {}), True, 'import keras.backend as K\n'), (53, 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), True, 'import keras.backend as K\n'), (94, 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), True, 'import numpy as np\n'), (99, 'tensorflow.zeros_like', 'tf.zeros_like', (['var'], {}), True, 'import tensorflow as tf\n'), (148, 'keras.applications.vgg16.decode_predictions', 'decode_predictions', (['predictions'], {}), False, 'from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\n'), (44, 'tensorflow.cast', 'tf.cast', (['(op.inputs[0] > 0.0)', 'dtype'], {}), True, 'import tensorflow as tf\n'), (28, 'keras.backend.square', 'K.square', (['x'], {}), True, 'import keras.backend as K\n'), (43, 'tensorflow.cast', 'tf.cast', (['(grad > 0.0)', 'dtype'], {}), True, 'import tensorflow as tf\n')]
xuyuandong/sequence_behavior_ctr_model
e1bb71b4579456b1c6fbf3b432a84a3cb52611b7
import tensorflow as tf #from tensorflow.python.ops.rnn_cell import * #from tensorflow.python.ops.rnn_cell_impl import _Linear from tensorflow.contrib.rnn.python.ops.core_rnn_cell import * #from tensorflow import keras from tensorflow.python.ops import math_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import variable_scope as vs #from keras import backend as K def din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("query_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all if mask is not None: mask = tf.equal(mask, tf.ones_like(mask)) key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output class VecAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(VecAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ arr = sorted(raw_arr, key=lambda d:d[0], reverse=True) pos, neg = 0., 0. for record in arr: if record[1] == 1.: pos += 1 else: neg += 1 fp, tp = 0., 0. xy_arr = [] for record in arr: if record[1] == 1.: tp += 1 else: fp += 1 xy_arr.append([fp/neg, tp/pos]) auc = 0. prev_x = 0. prev_y = 0. for x, y in xy_arr: if x != prev_x: auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def calc_gauc(raw_arr, nick_index): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ last_index = 0 gauc = 0. pv_sum = 0 for idx in xrange(len(nick_index)): if nick_index[idx] != nick_index[last_index]: input_arr = raw_arr[last_index:idx] auc_val=calc_auc(input_arr) if auc_val >= 0.0: gauc += auc_val * len(input_arr) pv_sum += len(input_arr) else: pv_sum += len(input_arr) last_index = idx return gauc / pv_sum def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask if mask is not None: # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
[ "tensorflow.concat", "tensorflow.python.ops.array_ops.split", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.tanh", "tensorflow.where", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.while_loop", "tensorflow.layers.dense", "tensorflow.name_scope", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.tensordot", "tensorflow.matmul", "tensorflow.shape", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.maximum", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.array_ops.transpose", "tensorflow.random_normal" ]
script/utils.py
[(29, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', '(80)'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_att' + stag)"}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', '(40)'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_att' + stag)"}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_2_all', '(1)'], {'activation': 'None', 'name': "('f3_att' + stag)"}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.tensordot', 'tf.tensordot', (['tmp', 'v'], {'axes': '(1)', 'name': '"""v_dot_tmp"""'}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.where', 'tf.where', (['key_masks', 'v_dot_tmp', 'paddings'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['v_dot_tmp'], {'name': '"""alphas"""'}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.layers.dense', 'tf.layers.dense', (['query', 'facts_size'], {'activation': 'None', 'name': "('f1' + stag)"}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', '(80)'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_att' + stag)"}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', '(40)'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_att' + stag)"}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_2_all', '(1)'], {'activation': 'None', 'name': "('f3_att' + stag)"}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[facts, output_ta, 0]'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow.transpose', 'tf.transpose', (['self_attention'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[facts, output_ta, 0]'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.transpose', 'tf.transpose', (['self_attention'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.layers.dense', 'tf.layers.dense', (['query', 'facts_size'], {'activation': 'None', 'name': "('f1_trans_shine' + stag)"}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', 'facts_size'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_shine_att' + stag)"}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', 'facts_size'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_shine_att' + stag)"}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.concat', 'tf.concat', ([], {'values': '[query, query]', 'axis': '(1)'}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(1)'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.where', 'tf.where', (['key_masks', 'scores', 'paddings'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scores'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.matmul', 'tf.matmul', (['scores', 'facts'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.python.ops.array_ops.split', 'array_ops.split', ([], {'value': 'value', 'num_or_size_splits': '(2)', 'axis': '(1)'}), False, 'from tensorflow.python.ops import array_ops\n'), (130, 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': 'scope', 'default_name': '"""prelu"""'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_size, attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.random_normal', 'tf.random_normal', (['[input_size, attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.random_normal', 'tf.random_normal', (['[attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.random_normal', 'tf.random_normal', (['[attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.name_scope', 'tf.name_scope', (['"""v"""'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.tensordot', 'tf.tensordot', (['facts', 'w1'], {'axes': '(1)'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.tensordot', 'tf.tensordot', (['query', 'w2'], {'axes': '(1)'}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.tanh', 'tf.tanh', (['(tmp1 + tmp2 + b)'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.ones_like', 'tf.ones_like', (['v_dot_tmp'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.expand_dims', 'tf.expand_dims', (['alphas', '(-1)'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(1)'], {}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scores'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.matmul', 'tf.matmul', (['scores', 'facts'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self_attention_tmp', '(1)'], {}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self_attention_tmp', '(1)'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (359, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.ones_like', 'tf.ones_like', (['scores'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.expand_dims', 'tf.expand_dims', (['scores', '(-1)'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '_x'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.ones_like', 'tf.ones_like', (['scores'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.where', 'tf.where', (['key_masks', 'scores', 'paddings'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.expand_dims', 'tf.expand_dims', (['scores', '(-1)'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(1.0)'], {'dtype': 'inputs.dtype'}), False, 'from tensorflow.python.ops import init_ops\n'), (103, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""gates"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (116, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""candidate"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (132, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.minimum', 'tf.minimum', (['(0.0)', '_x'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.shape', 'tf.shape', (['batch'], {}), True, 'import tensorflow as tf\n'), (325, 'tensorflow.shape', 'tf.shape', (['batch'], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.shape', 'tf.shape', (['tmp2'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n')]
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
43
Edit dataset card