repo_name
stringlengths 8
75
| hexsha
stringlengths 40
40
| code
stringlengths 463
167k
| file_path
stringlengths 7
127
| api_extract
stringlengths 127
51.5k
|
---|---|---|---|---|
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense
import arrayblow.v1.compt.keras as keras
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class LSTMSeq2Seq(BaseModel):
def __init__(self, check_optional_config=True, future_seq_len=2):
"""
Constructor of LSTM Seq2Seq model
"""
self.model = None
self.past_seq_len = None
self.future_seq_len = future_seq_len
self.feature_num = None
self.target_col_num = None
self.metric = None
self.latent_dim = None
self.batch_size = None
self.check_optional_config = check_optional_config
def _build_train(self, mc=False, **config):
"""
build LSTM Seq2Seq model
:param config:
:return:
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.latent_dim = config.get('latent_dim', 128)
self.dropout = config.get('dropout', 0.2)
self.lr = config.get('lr', 0.001)
# for restore in continuous training
self.batch_size = config.get('batch_size', 64)
training = True if mc else None
# Define an input sequence and process it.
self.encoder_inputs = Input(shape=(None, self.feature_num), name="encoder_inputs")
encoder = LSTM(units=self.latent_dim,
dropout=self.dropout,
return_state=True,
name="encoder_lstm")
encoder_outputs, state_h, state_c = encoder(self.encoder_inputs, training=training)
# We discard `encoder_outputs` and only keep the states.
self.encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
self.decoder_inputs = Input(shape=(None, self.target_col_num), name="decoder_inputs")
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
self.decoder_lstm = LSTM(self.latent_dim,
dropout=self.dropout,
return_sequences=True,
return_state=True,
name="decoder_lstm")
decoder_outputs, _, _ = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=self.encoder_states)
self.decoder_dense = Dense(self.target_col_num, name="decoder_dense")
decoder_outputs = self.decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)
self.model.compile(loss='mse',
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=self.lr))
return self.model
def _restore_model(self):
self.encoder_inputs = self.model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = self.model.layers[2].output # lstm_1
self.encoder_states = [state_h_enc, state_c_enc]
self.decoder_inputs = self.model.input[1] # input_2
self.decoder_lstm = self.model.layers[3]
self.decoder_dense = self.model.layers[4]
def _build_inference(self, mc=False):
training = True if mc else None
# from our previous model - mapping encoder sequence to state vectors
encoder_model = Model(self.encoder_inputs, self.encoder_states)
# A modified version of the decoding stage that takes in predicted target inputs
# and encoded state vectors, returning predicted target outputs and decoder state vectors.
# We need to hang onto these state vectors to run the next step of the inference loop.
decoder_state_input_h = Input(shape=(self.latent_dim,))
decoder_state_input_c = Input(shape=(self.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = self.decoder_dense(decoder_outputs)
decoder_model = Model([self.decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
return encoder_model, decoder_model
def _decode_sequence(self, input_seq, mc=False):
encoder_model, decoder_model = self._build_inference(mc=mc)
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
# Populate the first target sequence with end of encoding series value
target_seq[:, 0] = input_seq[:, -1, :self.target_col_num]
# Sampling loop for a batch of sequences - we will fill decoded_seq with predictions
# (to simplify, here we assume a batch of size 1).
decoded_seq = np.zeros((len(input_seq), self.future_seq_len, self.target_col_num))
for i in range(self.future_seq_len):
output, h, c = decoder_model.predict([target_seq] + states_value)
decoded_seq[:, i] = output[:, 0]
# Update the target sequence (of length 1).
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
target_seq[:, 0] = output[:, 0]
# Update states
states_value = [h, c]
return decoded_seq
def _get_decoder_inputs(self, x, y):
"""
lagged target series for teacher forcing
decoder_input data is one timestamp ahead of y
:param x: 3-d array in format of (sample_num, past_sequence_len, feature_num)
:param y: 3-d array in format of (sample_num, future_sequence_len, target_col_num)
Need to expand dimension if y is a 2-d array with one target col
:return: 3-d array of decoder inputs
"""
decoder_input_data = np.zeros(y.shape)
decoder_input_data[1:, ] = y[:-1, ]
decoder_input_data[0, 0] = x[-1, -1, :self.target_col_num]
decoder_input_data[0, 1:] = y[0, :-1]
return decoder_input_data
def _get_len(self, x, y):
self.past_seq_len = x.shape[1]
self.feature_num = x.shape[2]
# self.future_seq_len = y.shape[1]
self.target_col_num = y.shape[2]
def _expand_y(self, y):
"""
expand dims for y.
:param y:
:return:
"""
while len(y.shape) < 3:
y = np.expand_dims(y, axis=2)
return y
def _pre_processing(self, x, y, validation_data):
"""
pre_process input data.
1. expand dims for y and val_y
2. get decoder inputs for train data
3. get decoder inputs for validation data
:param x: train_x
:param y: train_y
:param validation_data:
:return: network input
"""
y = self._expand_y(y)
self._get_len(x, y)
decoder_input_data = self._get_decoder_inputs(x, y)
if validation_data is not None:
val_x, val_y = validation_data
val_y = self._expand_y(val_y)
val_decoder_input = self._get_decoder_inputs(val_x, val_y)
validation_data = ([val_x, val_decoder_input], val_y)
return x, y, decoder_input_data, validation_data
def fit_eval(self, data, validation_data=None, mc=False, verbose=0, **config):
"""
fit for one iteration
:param data: could be a tuple with numpy ndarray with form (x, y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index (data type needs to be numpy datetime
type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1,
or 1-d numpy array in format (no. of samples, ) if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation.
If this is specified, validation result will be the optimization target for automl.
Otherwise, train metric will be the optimization target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
x, y = data[0], data[1]
x, y, decoder_input_data, validation_data = self._pre_processing(x, y, validation_data)
# if model is not initialized, __build the model
if self.model is None:
self._build_train(mc=mc, **config)
# batch_size = config.get('batch_size', 64)
# lr = self.lr
# name = "seq2seq-batch_size-{}-epochs-{}-lr-{}-time-{}"\
# .format(batch_size, epochs, lr, time())
# tensorboard = TensorBoard(log_dir="logs/" + name)
hist = self.model.fit([x, decoder_input_data], y,
validation_data=validation_data,
batch_size=self.batch_size,
epochs=config.get("epochs", 10),
verbose=verbose,
# callbacks=[tensorboard]
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[-1]
else:
result = hist.history.get('val_' + str(self.metric))[-1]
return result
def evaluate(self, x, y, metric=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
# y = np.squeeze(y, axis=2)
if self.target_col_num == 1:
return [Evaluator.evaluate(m, y, y_pred) for m in metric]
else:
return [np.array([Evaluator.evaluate(m, y[:, i, :], y_pred[:, i, :])
for i in range(self.future_seq_len)])
for m in metric]
def predict(self, x, mc=False):
"""
Prediction on x.
:param x: input
:return: predicted y (expected dimension = 2)
"""
y_pred = self._decode_sequence(x, mc=mc)
if self.target_col_num == 1:
y_pred = np.squeeze(y_pred, axis=2)
return y_pred
def predict_with_uncertainty(self, x, n_iter=100):
result = np.array([self.predict(x, mc=True) for i in range(n_iter)])
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file path to be saved to.
:param config_path: the config file path to be saved to.
:return:
"""
self.model.save(model_path)
config_to_save = {"past_seq_len": self.past_seq_len,
"feature_num": self.feature_num,
"future_seq_len": self.future_seq_len,
"target_col_num": self.target_col_num,
"metric": self.metric,
"latent_dim": self.latent_dim,
"batch_size": self.batch_size}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
self.past_seq_len = config["past_seq_len"]
self.feature_num = config["feature_num"]
self.future_seq_len = config["future_seq_len"]
self.target_col_num = config["target_col_num"]
self.metric = config["metric"]
self.latent_dim = config["latent_dim"]
self.batch_size = config["batch_size"]
self.model = keras.models.load_model(model_path)
self._restore_model()
# self.model.load_weights(file_path)
def _get_required_parameters(self):
return {
# 'input_shape_x',
# 'input_shape_y',
# 'out_units'
}
def _get_optional_parameters(self):
return {
'past_seq_len'
'latent_dim'
'dropout',
'metric',
'lr',
'epochs',
'batch_size'
}
| pyzoo/zoo/zouwu/model/Seq2Seq.py | [(58, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (59, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (68, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (72, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (81, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (86, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (105, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (110, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (111, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (120, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (323, 'arrayblow.v1.compt.keras.models.load_model', 'keras.models.load_model', 'import arrayblow.v1.compt.keras as keras\n'), (89, 'arrayblow.v1.compt.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', 'import arrayblow.v1.compt.keras as keras\n')] |
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# MIT License
#
# Copyright (c) 2018 Roland Zimmermann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import time
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import *
from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant
import arrayblow.v1.compt.keras.backend as K
import arrayblow as ab
from zoo.automl.common.metrics import Evaluator
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import save_config
class AttentionRNNWrapper(Wrapper):
"""
This class is modified based on
https://github.com/zimmerrol/keras-utility-layer-collection/blob/master/kulc/attention.py.
The idea of the implementation is based on the paper:
"Effective Approaches to Attention-based Neural Machine Translation" by Luong et al.
This layer is an attention layer, which can be wrapped around arbitrary RNN layers.
This way, after each time step an attention vector is calculated
based on the current output of the LSTM and the entire input time series.
This attention vector is then used as a weight vector to choose special values
from the input data. This data is then finally concatenated to the next input time step's
data. On this a linear transformation in the same space as the input data's space
is performed before the data is fed into the RNN cell again.
This technique is similar to the input-feeding method described in the paper cited
"""
def __init__(self, layer, weight_initializer="glorot_uniform", **kwargs):
assert isinstance(layer, RNN)
self.layer = layer
self.supports_masking = True
self.weight_initializer = weight_initializer
super(AttentionRNNWrapper, self).__init__(layer, **kwargs)
def _validate_input_shape(self, input_shape):
if len(input_shape) != 3:
raise ValueError(
"Layer received an input with shape {0} but expected a Tensor of rank 3.".format(
input_shape[0]))
def build(self, input_shape):
self._validate_input_shape(input_shape)
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
input_dim = input_shape[-1]
if self.layer.return_sequences:
output_dim = self.layer.compute_output_shape(input_shape)[0][-1]
else:
output_dim = self.layer.compute_output_shape(input_shape)[-1]
input_dim = input_dim.value
output_dim = output_dim.value
self._W1 = self.add_weight(shape=(input_dim, input_dim), name="{}_W1".format(self.name),
initializer=self.weight_initializer)
self._W2 = self.add_weight(shape=(output_dim, input_dim), name="{}_W2".format(self.name),
initializer=self.weight_initializer)
self._W3 = self.add_weight(shape=(2 * input_dim, input_dim), name="{}_W3".format(self.name),
initializer=self.weight_initializer)
self._b2 = self.add_weight(shape=(input_dim,), name="{}_b2".format(self.name),
initializer=self.weight_initializer)
self._b3 = self.add_weight(shape=(input_dim,), name="{}_b3".format(self.name),
initializer=self.weight_initializer)
self._V = self.add_weight(shape=(input_dim, 1), name="{}_V".format(self.name),
initializer=self.weight_initializer)
super(AttentionRNNWrapper, self).build()
def compute_output_shape(self, input_shape):
self._validate_input_shape(input_shape)
return self.layer.compute_output_shape(input_shape)
@property
def trainable_weights(self):
return self._trainable_weights + self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self._non_trainable_weights + self.layer.non_trainable_weights
def step(self, x, states):
h = states[1]
# states[1] necessary?
# equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
total_x_prod = states[-1]
# comes from the constants (equals the input sequence)
X = states[-2]
# expand dims to add the vector which is only valid for this time step
# to total_x_prod which is valid for all time steps
hw = K.expand_dims(K.dot(h, self._W2), 1)
additive_atn = total_x_prod + hw
attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
x_weighted = K.sum(attention * X, [1])
x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
h, new_states = self.layer.cell.call(x, states[:-2])
return h, new_states
def call(self, x, constants=None, mask=None, initial_state=None):
# input shape: (n_samples, time (padded with zeros), input_dim)
input_shape = self.input_spec.shape
if self.layer.stateful:
initial_states = self.layer.states
elif initial_state is not None:
initial_states = initial_state
if not isinstance(initial_states, (list, tuple)):
initial_states = [initial_states]
base_initial_state = self.layer.get_initial_state(x)
if len(base_initial_state) != len(initial_states):
raise ValueError(
"initial_state does not have the correct length. Received length {0} "
"but expected {1}".format(len(initial_states), len(base_initial_state)))
else:
# check the state' shape
for i in range(len(initial_states)):
# initial_states[i][j] != base_initial_state[i][j]:
if not initial_states[i].shape.is_compatible_with(base_initial_state[i].shape):
raise ValueError(
"initial_state does not match the default base state of the layer. "
"Received {0} but expected {1}".format(
[x.shape for x in initial_states],
[x.shape for x in base_initial_state]))
else:
initial_states = self.layer.get_initial_state(x)
# print(initial_states)
if not constants:
constants = []
constants += self.get_constants(x)
last_output, outputs, states = K.rnn(
self.step,
x,
initial_states,
go_backwards=self.layer.go_backwards,
mask=mask,
constants=constants,
unroll=self.layer.unroll,
input_length=input_shape[1]
)
if self.layer.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.layer.states[i], states[i]))
if self.layer.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.layer.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def get_constants(self, x):
# add constants to speed up calculation
constants = [x, K.dot(x, self._W1) + self._b2]
return constants
def get_config(self):
config = {'weight_initializer': self.weight_initializer}
base_config = super(AttentionRNNWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MTNetKeras(BaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
"""
Constructor of MTNet model
"""
self.check_optional_config = check_optional_config
self.config = None
# config parameter
self.time_step = None # timestep
self.cnn_height = None # convolution window size (convolution filter height)` ?
self.long_num = None # the number of the long-term memory series
self.ar_window = None # the window size of ar model
self.feature_num = None # input's variable dimension (convolution filter width)
self.output_dim = None # output's variable dimension
self.cnn_hid_size = None
# last size is equal to en_conv_hidden_size, should be a list
self.rnn_hid_sizes = None
self.last_rnn_size = None
self.cnn_dropout = None
self.rnn_dropout = None
self.lr = None
self.batch_size = None
self.loss = None
self.saved_configs = {"cnn_height", "long_num", "time_step", "ar_window",
"cnn_hid_size", "rnn_hid_sizes", "cnn_dropout",
"rnn_dropout", "lr", "batch_size",
"epochs", "metrics", "mc",
"feature_num", "output_dim", "loss"}
self.model = None
self.metrics = None
self.mc = None
self.epochs = None
def apply_config(self, rs=False, config=None):
super()._check_config(**config)
if rs:
config_names = set(config.keys())
assert config_names.issuperset(self.saved_configs)
# assert config_names.issuperset(self.lr_decay_configs) or \
# config_names.issuperset(self.lr_configs)
self.epochs = config.get("epochs")
self.metrics = config.get("metrics", ["mean_squared_error"])
self.mc = config.get("mc")
self.feature_num = config["feature_num"]
self.output_dim = config["output_dim"]
self.time_step = config.get("time_step", 1)
self.long_num = config.get("long_num", 7)
self.ar_window = config.get("ar_window", 1)
self.cnn_height = config.get("cnn_height", 1)
self.cnn_hid_size = config.get("cnn_hid_size", 32)
self.rnn_hid_sizes = config.get("rnn_hid_sizes", [16, 32])
self.last_rnn_size = self.rnn_hid_sizes[-1]
self.rnn_dropout = config.get("rnn_dropout", 0.2)
self.cnn_dropout = config.get("cnn_dropout", 0.2)
self.loss = config.get('loss', "mae")
self.batch_size = config.get("batch_size", 64)
self.lr = config.get('lr', 0.001)
self._check_configs()
def _check_configs(self):
assert self.time_step >= 1, \
"Invalid configuration value. 'time_step' must be larger than 1"
assert self.time_step >= self.ar_window, \
"Invalid configuration value. 'ar_window' must not exceed 'time_step'"
assert isinstance(self.rnn_hid_sizes, list), \
"Invalid configuration value. 'rnn_hid_sizes' must be a list of integers"
# assert self.cnn_hid_size == self.last_rnn_size,\
# "Invalid configuration value. 'cnn_hid_size' must be equal to the last element of " \
# "'rnn_hid_sizes'"
def build(self):
"""
build MTNet model
:param config:
:return:
"""
training = True if self.mc else None
# long-term time series historical data inputs
long_input = Input(shape=(self.long_num, self.time_step, self.feature_num))
# short-term time series historical data
short_input = Input(shape=(self.time_step, self.feature_num))
# ------- no-linear component----------------
# memory and context : (batch, long_num, last_rnn_size)
memory = self.__encoder(long_input, num=self.long_num, name='memory', training=training)
# memory = memory_model(long_input)
context = self.__encoder(long_input, num=self.long_num, name='context', training=training)
# context = context_model(long_input)
# query: (batch, 1, last_rnn_size)
query_input = Reshape((1, self.time_step, self.feature_num),
name='reshape_query')(short_input)
query = self.__encoder(query_input, num=1, name='query', training=training)
# query = query_model(query_input)
# prob = memory * query.T, shape is (long_num, 1)
query_t = Permute((2, 1))(query)
prob = Lambda(lambda xy: ab.v1.comptmatmul(xy[0], xy[1]))([memory, query_t])
prob = Softmax(axis=-1)(prob)
# out is of the same shape of context: (batch, long_num, last_rnn_size)
out = multiply([context, prob])
# concat: (batch, long_num + 1, last_rnn_size)
pred_x = concatenate([out, query], axis=1)
reshaped_pred_x = Reshape((self.last_rnn_size * (self.long_num + 1),),
name="reshape_pred_x")(pred_x)
nonlinear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(reshaped_pred_x)
# ------------ ar component ------------
if self.ar_window > 0:
ar_pred_x = Reshape((self.ar_window * self.feature_num,),
name="reshape_ar")(short_input[:, -self.ar_window:])
linear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(ar_pred_x)
else:
linear_pred = 0
y_pred = Add()([nonlinear_pred, linear_pred])
self.model = Model(inputs=[long_input, short_input], outputs=y_pred)
# lr decay
# def lr_scheduler(epoch, r):
# max_lr = 0.03
# min_lr = 0.0001
# lr = min_lr + (max_lr - min_lr) * math.exp(-epoch / 60)
# return lr
# callbacks = [ab.v1.comptkeras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)]
# initial_lr = 0.003
# rate = math.exp(-1 / 60)
# lr_schedule = ab.v1.comptkeras.optimizers.schedules.ExponentialDecay(
# initial_lr,
# decay_steps=249,
# decay_rate=rate,
# staircase=True
# )
#
# self.model.compile(loss="mae",
# metrics=metrics,
# optimizer=ab.v1.comptkeras.optimizers.Adam(learning_rate=lr_schedule))
self.model.compile(loss=self.loss,
metrics=self.metrics,
optimizer=ab.v1.comptkeras.optimizers.Adam(lr=self.lr))
return self.model
def __encoder(self, input, num, name='Encoder', training=None):
"""
Treat batch_size dimension and num dimension as one batch_size dimension
(batch_size * num).
:param input: <batch_size, num, time_step, input_dim>
:param num: the number of input time series data. For short term data, the num is 1.
:return: the embedded of the input <batch_size, num, last_rnn_hid_size>
"""
# input = Input(shape=(num, self.time_step, self.feature_num))
batch_size_new = self.batch_size * num
Tc = self.time_step - self.cnn_height + 1
# CNN
# reshaped input: (batch_size_new, time_step, feature_num, 1)
reshaped_input = Lambda(lambda x:
K.reshape(x, (-1, self.time_step, self.feature_num, 1),),
name=name+'reshape_cnn')(input)
# output: <batch_size_new, conv_out, 1, en_conv_hidden_size>
cnn_out = Conv2D(filters=self.cnn_hid_size,
kernel_size=(self.cnn_height, self.feature_num),
padding="valid",
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),
activation="relu")(reshaped_input)
cnn_out = Dropout(self.cnn_dropout)(cnn_out, training=training)
rnn_input = Lambda(lambda x:
K.reshape(x, (-1, num, Tc, self.cnn_hid_size)),)(cnn_out)
# use AttentionRNNWrapper
rnn_cells = [GRUCell(h_size, activation="relu", dropout=self.rnn_dropout)
for h_size in self.rnn_hid_sizes]
attention_rnn = AttentionRNNWrapper(RNN(rnn_cells),
weight_initializer=TruncatedNormal(stddev=0.1))
outputs = []
for i in range(num):
input_i = rnn_input[:, i]
# input_i = (batch, conv_hid_size, Tc)
input_i = Permute((2, 1), input_shape=[Tc, self.cnn_hid_size])(input_i)
# output = (batch, last_rnn_hid_size)
output_i = attention_rnn(input_i, training=training)
# output = (batch, 1, last_rnn_hid_size)
output_i = Reshape((1, -1))(output_i)
outputs.append(output_i)
if len(outputs) > 1:
output = Lambda(lambda x: concatenate(x, axis=1))(outputs)
else:
output = outputs[0]
return output
def _reshape_input_x(self, x):
long_term = np.reshape(x[:, : self.time_step * self.long_num],
[-1, self.long_num, self.time_step, x.shape[-1]])
short_term = np.reshape(x[:, self.time_step * self.long_num:],
[-1, self.time_step, x.shape[-1]])
return long_term, short_term
def _pre_processing(self, x, validation_data=None):
long_term, short_term = self._reshape_input_x(x)
if validation_data:
val_x, val_y = validation_data
long_val, short_val = self._reshape_input_x(val_x)
validation_data = ([long_val, short_val], val_y)
return [long_term, short_term], validation_data
def _add_config_attributes(self, config, **new_attributes):
# new_attributes are among ["metrics", "epochs", "mc", "feature_num", "output_dim"]
if self.config is None:
self.config = config
else:
if config:
raise ValueError("You can only pass new configuations for 'mc', 'epochs' and "
"'metrics' during incremental fitting. "
"Additional configs passed are {}".format(config))
if new_attributes["metrics"] is None:
del new_attributes["metrics"]
self.config.update(new_attributes)
def _check_input(self, x, y):
input_feature_num = x.shape[-1]
input_output_dim = y.shape[-1]
if input_feature_num is None:
raise ValueError("input x is None!")
if input_output_dim is None:
raise ValueError("input y is None!")
if self.feature_num is not None and self.feature_num != input_feature_num:
raise ValueError("input x has different feature number (the shape of last dimension) "
"{} with the fitted model, which is {}."
.format(input_feature_num, self.feature_num))
if self.output_dim is not None and self.output_dim != input_output_dim:
raise ValueError("input y has different prediction size (the shape of last dimension) "
"of {} with the fitted model, which is {}."
.format(input_output_dim, self.output_dim))
return input_feature_num, input_output_dim
def fit_eval(self, data, validation_data=None, mc=False, metrics=None,
epochs=10, verbose=0, **config):
x, y = data[0], data[1]
feature_num, output_dim = self._check_input(x, y)
self._add_config_attributes(config, epochs=epochs, mc=mc, metrics=metrics,
feature_num=feature_num, output_dim=output_dim)
self.apply_config(config=self.config)
processed_x, processed_validation_data = self._pre_processing(x, validation_data)
# if model is not initialized, __build the model
if self.model is None:
st = time.time()
self.build()
end = time.time()
if verbose == 1:
print("Build model took {}s".format(end - st))
st = time.time()
hist = self.model.fit(processed_x, y, validation_data=processed_validation_data,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=verbose)
if verbose == 1:
print("Fit model took {}s".format(time.time() - st))
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metrics[0])[-1]
else:
result = hist.history.get('val_' + str(self.metrics[0]))[-1]
return result
def evaluate(self, x, y, metrics=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
if y_pred.shape[1] == 1:
multioutput = 'uniform_average'
else:
multioutput = 'raw_values'
# y = np.squeeze(y, axis=2)
return [Evaluator.evaluate(m, y, y_pred, multioutput=multioutput) for m in metrics]
def predict(self, x, mc=False):
input_x = self._reshape_input_x(x)
return self.model.predict(input_x)
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.output_dim))
for i in range(n_iter):
result[i, :, :] = self.predict(x, mc=True)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
self.model.save_weights(model_path)
config_to_save = {"cnn_height": self.cnn_height,
"long_num": self.long_num,
"time_step": self.time_step,
"ar_window": self.ar_window,
"cnn_hid_size": self.cnn_hid_size,
"rnn_hid_sizes": self.rnn_hid_sizes,
"cnn_dropout": self.cnn_dropout,
"rnn_dropout": self.rnn_dropout,
"lr": self.lr,
"batch_size": self.batch_size,
# for fit eval
"epochs": self.epochs,
# todo: can not serialize metrics unless all elements are str
"metrics": self.metrics,
"mc": self.mc,
"feature_num": self.feature_num,
"output_dim": self.output_dim,
"loss": self.loss
}
assert set(config_to_save.keys()) == self.saved_configs, \
"The keys in config_to_save is not the same as self.saved_configs." \
"Please keep them consistent"
# if self.decay_epochs > 0:
# lr_decay_configs = {"min_lr": self.min_lr,
# "max_lr": self.max_lr}
# assert set(lr_decay_configs.keys()) == self.lr_decay_configs, \
# "The keys in lr_decay_configs is not the same as self.lr_decay_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_decay_configs)
# else:
# lr_configs = {"lr": self.lr_value}
# assert set(lr_configs.keys()) == self.lr_configs, \
# "The keys in lr_configs is not the same as self.lr_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_configs)
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
"""
self.config = config
self.apply_config(rs=True, config=config)
self.build()
self.model.load_weights(model_path)
def _get_optional_parameters(self):
return {
"batch_size",
"cnn_dropout",
"rnn_dropout",
"time_step",
"cnn_height",
"long_num",
"ar_size",
"loss",
"cnn_hid_size",
"rnn_hid_sizes",
"lr"
}
def _get_required_parameters(self):
return {
"feature_num",
"output_dim"
}
| pyzoo/zoo/zouwu/model/MTNet_keras.py | [(142, 'arrayblow.v1.compt.keras.backend.sum', 'K.sum', 'import arrayblow.v1.compt.keras.backend as K\n'), (186, 'arrayblow.v1.compt.keras.backend.rnn', 'K.rnn', 'import arrayblow.v1.compt.keras.backend as K\n'), (356, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (139, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (141, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (144, 'arrayblow.v1.compt.keras.backend.concatenate', 'K.concatenate', 'import arrayblow.v1.compt.keras.backend as K\n'), (224, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (379, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (417, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (333, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (343, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (344, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (398, 'arrayblow.v1.compt.keras.backend.reshape', 'K.reshape', 'import arrayblow.v1.compt.keras.backend as K\n'), (404, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (405, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (410, 'arrayblow.v1.compt.keras.backend.reshape', 'K.reshape', 'import arrayblow.v1.compt.keras.backend as K\n'), (351, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (352, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n')] |
YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | """ The model deploy file is used to leverage a trained model to perform inference on unknown set of node deviations.
"""
import os
os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3'
import sys
current_path=os.path.dirname(__file__)
parentdir = os.path.dirname(current_path)
#Adding Path to various Modules
sys.path.append("../core")
sys.path.append("../visualization")
sys.path.append("../utilities")
sys.path.append("../datasets")
sys.path.append("../trained_models")
sys.path.append("../config")
import numpy as np
import pandas as pd
import arrayblow as ab
import csv
import logging
ab.v1.comptget_logger().setLevel(logging.ERROR)
from arrayblow.v1.compt.keras.models import load_model
#Importing Config files
import assembly_config as config
import model_config as cftrain
import measurement_config as mscofig
#Importing required modules from the package
from measurement_system import HexagonWlsScanner
from assembly_system import VRMSimulationModel
from assembly_system import PartType
from wls400a_system import GetInferenceData
from metrics_eval import MetricsEval
from data_import import GetTrainData
#from cam_viz import CamViz
#from cop_viz import CopViz
class DeployModel:
"""The Deploy Model class is used to import a trained model and use it to infer on unknown data
"""
def get_model(self,model_path):
"""get_model method is is used to retrieve the trained model from a given path
:param model_path: Path to the trained model, ideally it should be same as the train model path output
:type model_path: str (required)
"""
from arrayblow.v1.compt.keras.models import load_model
try:
inference_model=load_model(model_path)
print('Deep Learning Model found and loaded')
except AssertionError as error:
print(error)
print('Model not found at this path ',model_path, ' Update path in config file if required')
return inference_model
def model_inference(self,inference_data,inference_model,deploy_path,print_result=0,plot_result=0,get_cam_data=0,append_result=0):
"""model_inference method is used to infer from unknown sample(s) using the trained model
:param inference_data: Unknown dataset having same structure as the train dataset
:type inference_data: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required) (required)
:param inference_model: Trained model
:type inference_model: keras.model (required)
:param print_result: Flag to indicate if the result needs to be printed, 0 by default, change to 1 in case the results need to be printed on the console
:type print_result: int
"""
result=inference_model.predict(inference_data)
description="The Process Parameters variations are inferred from the obtained measurement data and the trained CNN based model"
print('The model estimates are: ')
rounded_result=np.round(result,2)
if(print_result==1):
print(rounded_result)
if(append_result==1):
with open ("user_preds.csv",'a',newline='') as filedata:
#fieldnames = ['kcc1','kcc2','kcc3','kcc4','kcc5','kcc6']
writer = csv.writer(filedata, delimiter=',')
writer.writerow(rounded_result[0,:].tolist())
#writer.writerow(dict(zip(fieldnames, rounded_result[0,:].tolist())))
#filedata.write(rounded_result[0,:].tolist())
if(plot_result==1):
print("Plotting Results in HTML...")
import plotly.graph_objects as go
import plotly as py
result_str = ["%.2f" % number for number in rounded_result[0,:]]
kcc_str=[]
for i in range(rounded_result.shape[1]):
kcc_str.append("X("+str(i)+"): ")
#kcc_str=["X(1): ","X(2): ", "X(3): ", "X(4): ", "X(5): ", "X(6): "]
display_str=np.core.defchararray.add(kcc_str, result_str)
print(display_str)
fig = go.Figure(data=go.Scatter(y=rounded_result[0,:], marker=dict(
size=30,color=100), mode='markers+text',text=display_str,x=kcc_str))
fig.update_traces( textfont_size=20,textposition='top center')
fig.update_layout(title_text='Deep Learning for Manufacturing - Model Estimates')
py.offline.plot(fig, filename=deploy_path+"results.html")
if(get_cam_data==1):
#print(inference_model.summary())
from cam_viz import CamViz
from cop_viz import CopViz
input_conv_data=inference_data
base_cop=input_conv_data[0,:,:,:,0]+input_conv_data[0,:,:,:,1]+input_conv_data[0,:,:,:,2]
base_cop[base_cop!=0]=0.6
process_parameter_id=np.argmax(abs(result[0,:]))
print("Plotting Gradient based Class Activation Map for Process Parameter: ",process_parameter_id)
camviz=CamViz(inference_model,'conv_block_9')
#For explicit plotting change ID here
#process_parameter_id=0
cop_input=input_conv_data[0:1,:,:,:,:]
fmap_eval, grad_wrt_fmap_eval=camviz.grad_cam_3d(cop_input,process_parameter_id)
alpha_k_c= grad_wrt_fmap_eval.mean(axis=(0,1,2,3)).reshape((1,1,1,-1))
Lc_Grad_CAM = np.maximum(np.sum(fmap_eval*alpha_k_c,axis=-1),0).squeeze()
scale_factor = np.array(cop_input.shape[1:4])/np.array(Lc_Grad_CAM.shape)
from scipy.ndimage.interpolation import zoom
import arrayblow.v1.compt.keras.backend as K
_grad_CAM = zoom(Lc_Grad_CAM,scale_factor)
arr_min, arr_max = np.min(_grad_CAM), np.max(_grad_CAM)
grad_CAM = (_grad_CAM - arr_min) / (arr_max - arr_min + K.epsilon())
#Code for Grad CAM Plotting
import plotly.graph_objects as go
import plotly as py
import plotly.express as px
X, Y, Z = np.mgrid[0:len(base_cop), 0:len(base_cop), 0:len(base_cop)]
#input_conv_data[0,:,:,:,0]=0.2
values_cop = base_cop
values_grad_cam=grad_CAM
trace1=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_cop.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
colorscale='Greens'
)
trace2=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_grad_cam.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17,
colorscale='orrd' # needs to be a large number for good volume rendering
)
data = [trace1,trace2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
fig = go.Figure(data=data,layout=layout)
plot_file_name=deploy_path+'voxel_grad_cam.html'
py.offline.plot(fig, filename=plot_file_name)
return result
if __name__ == '__main__':
print("Welcome to Deep Learning for Manufacturing (dlmfg)...")
print('Parsing from Assembly Config File....')
data_type=config.assembly_system['data_type']
application=config.assembly_system['application']
part_type=config.assembly_system['part_type']
part_name=config.assembly_system['part_name']
data_format=config.assembly_system['data_format']
assembly_type=config.assembly_system['assembly_type']
assembly_kccs=config.assembly_system['assembly_kccs']
assembly_kpis=config.assembly_system['assembly_kpis']
voxel_dim=config.assembly_system['voxel_dim']
point_dim=config.assembly_system['point_dim']
voxel_channels=config.assembly_system['voxel_channels']
noise_type=config.assembly_system['noise_type']
mapping_index=config.assembly_system['mapping_index']
file_names_x=config.assembly_system['test_data_files_x']
file_names_y=config.assembly_system['test_data_files_y']
file_names_z=config.assembly_system['test_data_files_z']
system_noise=config.assembly_system['system_noise']
aritifical_noise=config.assembly_system['aritifical_noise']
data_folder=config.assembly_system['data_folder']
kcc_folder=config.assembly_system['kcc_folder']
kcc_files=config.assembly_system['test_kcc_files']
print('Initializing the Assembly System and Measurement System....')
measurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)
vrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)
deploy_model=DeployModel()
#Generate Paths
train_path='../trained_models/'+part_type
model_path=train_path+'/model'+'/trained_model_0.h5'
logs_path=train_path+'/logs'
deploy_path=train_path+'/deploy/'
#Voxel Mapping File
get_data=GetTrainData();
print('Importing and Preprocessing Cloud-of-Point Data')
dataset=[]
dataset.append(get_data.data_import(file_names_x,data_folder))
dataset.append(get_data.data_import(file_names_y,data_folder))
dataset.append(get_data.data_import(file_names_z,data_folder))
point_index=get_data.load_mapping_index(mapping_index)
#Make an Object of the Measurement System Class
measurement_system=HexagonWlsScanner(data_type,application, system_noise,part_type,data_format)
#Make an Object of the Assembly System Class
assembly_system=PartType(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
#Inference from simulated data
inference_model=deploy_model.get_model(model_path)
print(inference_model.summary())
input_conv_data, kcc_subset_dump,kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,dataset,point_index)
y_pred=deploy_model.model_inference(input_conv_data,inference_model,deploy_path,print_result=1,plot_result=1);
evalerror=1
if(evalerror==1):
kcc_dataset=get_data.data_import(kcc_files,kcc_folder)
metrics_eval=MetricsEval();
eval_metrics,accuracy_metrics_df=metrics_eval.metrics_eval_base(y_pred,kcc_dataset,logs_path)
print('Evaluation Metrics: ',eval_metrics)
accuracy_metrics_df.to_csv(logs_path+'/metrics_test.csv')
np.savetxt((deploy_path+"predicted.csv"), y_pred, delimiter=",")
print('Predicted Values saved to disk...')
#Inference from Measurement Data
#measurement_files=mscofig.ms_parameters['measurement_files']
#Make an object of Get Data Class
#get_data=GetInferenceData();
#Call functions of the get Data Class
#for measurement_file in measurement_files:
#measurement_path=deploy_path+measurement_file
#measurement_data=get_data.load_measurement_file(measurement_path)
#voxel_point_index=get_data.load_mapping_index(voxel_path)
#y_dev_data_filtered=get_data.data_pre_processing(measurement_data,voxel_channels)
#input_conv_data=get_data.voxel_mapping(y_dev_data_filtered,voxel_point_index,point_dim,voxel_dim,voxel_channels)
#y_pred=deploy_model.model_inference(input_conv_data,inference_model);
#print('KCCs for: ',measurement_file)
#print(y_pred)
#Code for Voxel Vizvalization
#Code for CAM Visualization
viz=0
if(viz==1):
print(inference_model.summary())
camviz=CamViz(inference_model,'conv3d_3')
grads=camviz.grad_cam_3d(input_conv_data[1:2,:,:,:,:],1) | core/model_deployment.py | [(24, 'arrayblow.v1.compt.get_logger', 'ab.v1.compt.get_logger', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.keras.models.load_model', 'load_model', 'from arrayblow.v1.compt.keras.models import load_model\n'), (136, 'arrayblow.v1.compt.keras.backend.epsilon', 'K.epsilon', 'import arrayblow.v1.compt.keras.backend as K\n')] |
jacenkow/inside | 6f860420644b50b78981158a59ceed8cdbd209bf | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import arrayblow as ab
from arrayblow.v1.compt.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@ab.v1.comptfunction
def train_step(self, images, labels):
with ab.v1.comptGradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@ab.v1.comptfunction
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| inside/pipelines/clevr.py | [(48, 'arrayblow.v1.compt.keras.metrics.Mean', 'Mean', 'from arrayblow.v1.compt.keras.metrics import Mean\n'), (52, 'arrayblow.v1.compt.keras.metrics.Mean', 'Mean', 'from arrayblow.v1.compt.keras.metrics import Mean\n'), (144, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n')] |
kaylani2/machineLearning | 692623abf6fe02bde6c7da6c2f8c0ec526a3e8f8 | import os
import time
from multiprocessing import Process
from typing import Tuple
import flwr as fl
import numpy as np
import arrayblow as ab
from flwr.server.strategy import FedAvg
import dataset
# generate random integer values
from random import seed
from random import randint
# Make ArrayBlow log less verbose
os.environ["AB_CPP_MIN_LOG_LEVEL"] = "3"
# K: Prevent AB from using GPU (not enough memory)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
DATASET = Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
def start_server(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start the server with a slightly adjusted FedAvg strategy."""
strategy = FedAvg(min_available_clients=num_clients, fraction_fit=fraction_fit)
# Exposes the server by default on port 8080
fl.server.start_server(strategy=strategy, config={"num_rounds": num_rounds})
def start_client(dataset: DATASET) -> None:
"""Start a single client with the provided dataset."""
# Load and compile a Keras model for CIFAR-10
#model = ab.v1.comptkeras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
model = ab.v1.comptkeras.Sequential(
[
ab.v1.comptkeras.Input(shape=(32, 32, 3)),
ab.v1.comptkeras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
ab.v1.comptkeras.layers.MaxPooling2D(pool_size=(2, 2)),
ab.v1.comptkeras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
ab.v1.comptkeras.layers.MaxPooling2D(pool_size=(2, 2)),
ab.v1.comptkeras.layers.Flatten(),
ab.v1.comptkeras.layers.Dropout(0.5),
ab.v1.comptkeras.layers.Dense(10, activation="softmax"),
]
)
model.compile("adam", "sparse_categorical_crossentropy", metrics=[ab.v1.comptkeras.metrics.CategoricalAccuracy(), ab.v1.comptkeras.metrics.MeanSquaredError()])
### @TODO: check if "accuracy" and ab.v1.comptkeras.metrics.CategoricalAccuracy() return the same results
# Unpack the CIFAR-10 dataset partition
(x_train, y_train), (x_test, y_test) = dataset
# Define a Flower client
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
"""Return current weights."""
return model.get_weights()
def fit(self, parameters, config):
"""Fit model and return new weights as well as number of training
examples."""
model.set_weights(parameters)
# Remove steps_per_epoch if you want to train over the full dataset
# https://keras.io/api/models/model_training_apis/#fit-method
#nap_time = randint (0, 5)
#time.sleep (nap_time)
#print ("Slept for", nap_time, "seconds.")
model.fit(x_train, y_train, epochs=10, batch_size=256, steps_per_epoch=10)
return model.get_weights(), len(x_train), {}
def evaluate(self, parameters, config):
"""Evaluate using provided parameters."""
model.set_weights(parameters)
loss, accuracy, mse = model.evaluate(x_test, y_test)
print ('"Loss:', loss, ". Accuracy:", accuracy, ". MSE:", mse, ".")
return loss, len(x_test), {"accuracy": accuracy}
# Start Flower client
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient())
def run_simulation(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start a FL simulation."""
# This will hold all the processes which we are going to create
processes = []
# Start the server
server_process = Process(
target=start_server, args=(num_rounds, num_clients, fraction_fit)
)
server_process.start()
processes.append(server_process)
# Optionally block the script here for a second or two so the server has time to start
time.sleep(2)
# Load the dataset partitions
partitions = dataset.load(num_partitions=num_clients)
# Start all the clients
for partition in partitions:
client_process = Process(target=start_client, args=(partition,))
client_process.start()
processes.append(client_process)
# Block until all processes are finished
for p in processes:
p.join()
if __name__ == "__main__":
run_simulation(num_rounds=100, num_clients=5, fraction_fit=0.5)
| src/specific_models/federated/single_machine_simulation_flower/single_machine_simulation.py | [(41, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (42, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (43, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'ab.v1.compt.keras.layers.MaxPooling2D', 'import arrayblow as ab\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (45, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'ab.v1.compt.keras.layers.MaxPooling2D', 'import arrayblow as ab\n'), (46, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (47, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.metrics.CategoricalAccuracy', 'ab.v1.compt.keras.metrics.CategoricalAccuracy', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.metrics.MeanSquaredError', 'ab.v1.compt.keras.metrics.MeanSquaredError', 'import arrayblow as ab\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
from arrayblow.v1.compt.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(
params.retinanet_loss, params.architecture.num_classes)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.architecture.min_level,
params.architecture.max_level,
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = ab.v1.comptkeras.layers.Input(
shape=input_shape, name='',
dtype=ab.v1.comptbfloat16 if self._use_bfloat16 else ab.v1.comptfloat32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = ab.v1.compttranspose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = ab.v1.comptcast(cls_outputs[level], ab.v1.comptfloat32)
box_outputs[level] = ab.v1.comptcast(box_outputs[level], ab.v1.comptfloat32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = ab.v1.comptkeras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build ab.v1.comptkeras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors to save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
| official/vision/detection/modeling/retinanet_model.py | [(65, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (86, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.keras.models.Model', 'ab.v1.compt.keras.models.Model', 'import arrayblow as ab\n'), (123, 'arrayblow.v1.compt.python.keras.backend.get_graph', 'backend.get_graph', 'from arrayblow.v1.compt.python.keras import backend\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification network."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import arrayblow as ab
@ab.v1.comptkeras.utils.register_keras_serializable(package='Text')
class TokenClassification(ab.v1.comptkeras.Model):
"""TokenClassification network head for BERT modeling.
This network implements a simple token classifier head based on a dense layer.
Arguments:
input_width: The innermost dimension of the input tensor to this network.
num_classes: The number of classes that this network should classify to.
activation: The activation, if any, for the dense layer in this network.
initializer: The intializer for the dense layer in this network. Defaults to
a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
input_width,
num_classes,
initializer='glorot_uniform',
output='logits',
**kwargs):
self._self_setattr_tracking = False
self._config_dict = {
'input_width': input_width,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
sequence_data = ab.v1.comptkeras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=ab.v1.comptfloat32)
self.logits = ab.v1.comptkeras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')(
sequence_data)
predictions = ab.v1.comptkeras.layers.Activation(ab.v1.comptnn.log_softmax)(self.logits)
if output == 'logits':
output_tensors = self.logits
elif output == 'predictions':
output_tensors = predictions
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(TokenClassification, self).__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| official/nlp/modeling/networks/token_classification.py | [(25, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (55, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.layers import Activation
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
# Initialize the model
model = Sequential()
input_shape = (height, width, depth)
# If we are using 'channels-first', update the input shape
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
# First set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
# return the constructed network architecture
return model
| mlcollect/cnn/lenet.py | [(14, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (18, 'arrayblow.v1.compt.keras.backend.image_data_format', 'K.image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (22, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (23, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (24, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (28, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (29, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (32, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (33, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (34, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (36, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (37, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import BatchNormalization
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.layers import Activation
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Dropout
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras import backend as K
class MiniVGGNet:
@staticmethod
def build(width, height, depth, classes, last_active="solfmax"):
# Initialize the model, input shape and the channel dimension
model = Sequential()
input_shape = (height, width, depth)
channel_dim = -1
# If we are using 'channels_first', update the input shape and channels dimension
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
channel_dim = 1
# First CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(classes))
model.add(Activation(last_active))
# Return the constructed network architecture
return model
| mlcollect/cnn/minivggnet.py | [(16, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (21, 'arrayblow.v1.compt.keras.backend.image_data_format', 'K.image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (26, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (28, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (29, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (30, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (31, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (32, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (33, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (36, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (37, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (39, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (40, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (42, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (43, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (46, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (47, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (48, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (49, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (50, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (53, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (54, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import arrayblow as ab
from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
assert 'label_list' in kwargs
bs = kwargs['batch_size']
samples = ab.v1.comptzeros((bs, 32000), ab.v1.comptfloat32)
labels = ab.v1.comptzeros([bs], ab.v1.comptint32)
labels_onehot = ab.v1.comptone_hot(labels, len(kwargs['label_list']))
return ab.v1.comptdata.Dataset.from_tensors((samples, labels_onehot)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'num_clusters': 0, 'alpha_init': 0},
{'num_clusters': 4, 'alpha_init': 0},
{'num_clusters': 0, 'alpha_init': 1.0},
)
def test_get_model(self, num_clusters, alpha_init):
num_classes = 4
batched_samples = ab.v1.comptzeros([3, 20000])
y_onehot = ab.v1.comptone_hot([0, 1, 2], num_classes)
model = train_keras.models.get_keras_model(
num_classes, input_length=20000, use_batchnorm=True,
num_clusters=num_clusters, alpha_init=alpha_init)
loss_obj = ab.v1.comptkeras.losses.CategoricalCrossentropy(from_logits=True)
opt = ab.v1.comptkeras.optimizers.Adam()
train_loss = ab.v1.comptkeras.metrics.Mean()
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
summary_writer = ab.v1.comptsummary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_accuracy, summary_writer)
gstep = opt.iterations
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@flagsaver.flagsaver
def test_full_flow(self):
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.nc = 2
flags.FLAGS.label_key = 'emotion'
flags.FLAGS.label_list = ['no', 'yes']
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
ab.v1.comptcompat.v2.enable_v2_behavior()
assert ab.v1.comptexecuting_eagerly()
absltest.main()
| non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py | [(36, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (37, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.one_hot', 'ab.v1.compt.one_hot', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.losses.CategoricalCrossentropy', 'ab.v1.compt.keras.losses.CategoricalCrossentropy', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (61, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Ground-truth state 2-step Agent."""
import time
import numpy as np
from ravens import utils
from ravens.agents import GtState6DAgent
from ravens.agents import GtStateAgent
from ravens.models import mdn_utils
from ravens.models import MlpModel
import arrayblow as ab
ab.v1.comptcompat.v1.enable_eager_execution()
class GtState2StepAgent(GtStateAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super(GtState2StepAgent, self).__init__(name, task)
# Set up model.
self.pick_model = None
self.place_model = None
self.pick_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.metric = ab.v1.comptkeras.metrics.Mean(name='metric')
self.val_metric = ab.v1.comptkeras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = ab.v1.comptkeras.losses.MeanSquaredError()
@ab.v1.comptfunction
def train_step(pick_model, place_model, batch_obs, batch_act,
loss_criterion):
with ab.v1.comptGradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
# batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:,0:3] +
# ab.v1.comptrandom.normal(shape=batch_act[:,0:3].shape,
# stddev=0.001)), axis=1)
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:], prediction)
grad = tape.gradient(loss1, place_model.trainable_variables)
self.place_optim.apply_gradients(
zip(grad, place_model.trainable_variables))
return loss0 + loss1
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
ab.v1.comptsummary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction))
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_prediction = self.place_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_prediction = place_prediction[:, 0, :]
place_prediction = place_prediction[0]
prediction = np.hstack((pick_prediction, place_prediction))
# just go exactly to objects, from observations
# p0_position = np.hstack((gt_obs[3:5], 0.02))
# p0_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[5]*self.theta_scale))
# p1_position = np.hstack((gt_obs[0:2], 0.02))
# p1_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[2]*self.theta_scale))
# just go exactly to objects, predicted
p0_position = np.hstack((prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[2] * self.theta_scale))
p1_position = np.hstack((prediction[3:5], 0.02))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[5] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place'
if self.task == 'sweeping':
act['primitive'] = 'sweep'
elif self.task == 'pushing':
act['primitive'] = 'push'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
#-------------------------------------------------------------------------
# Helper Functions
#-------------------------------------------------------------------------
def load(self, num_iter):
"""Load something."""
# Do something here.
# self.model.load(os.path.join(self.models_dir, model_fname))
# Update total training iterations of agent.
self.total_iter = num_iter
def save(self):
"""Save models."""
# Do something here.
# self.model.save(os.path.join(self.models_dir, model_fname))
pass
class GtState3Step6DAgent(GtState6DAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super().__init__(name, task)
# Set up model.
self.pick_model = None
self.place_se2_model = None
self.place_rpz_model = None
self.pick_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_se2_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_rpz_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.metric = ab.v1.comptkeras.metrics.Mean(name='metric')
self.val_metric = ab.v1.comptkeras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place se2 model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_se2_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_se2_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place rpz model
obs_dim = obs_vector.shape[0] + act_dim + 3
act_dim = 3
self.place_rpz_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_rpz_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = ab.v1.comptkeras.losses.MeanSquaredError()
@ab.v1.comptfunction
def train_step(pick_model, place_se2_model, place_rpz_model, batch_obs,
batch_act, loss_criterion):
with ab.v1.comptGradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_se2_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:6], prediction)
grad = tape.gradient(loss1, place_se2_model.trainable_variables)
self.place_se2_optim.apply_gradients(
zip(grad, place_se2_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 3:6]), axis=1)
prediction = place_rpz_model(batch_obs)
loss2 = loss_criterion(batch_act[:, 6:], prediction)
grad = tape.gradient(loss2, place_rpz_model.trainable_variables)
self.place_rpz_optim.apply_gradients(
zip(grad, place_rpz_model.trainable_variables))
return loss0 + loss1 + loss2
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_se2_model,
self.place_rpz_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
ab.v1.comptsummary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction)).astype(np.float32)
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_se2_prediction = self.place_se2_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_se2_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_se2_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_se2_prediction = place_se2_prediction[:, 0, :]
place_se2_prediction = place_se2_prediction[0]
# Get observations and run rpz prediction
obs_with_pick_place_se2 = np.hstack(
(obs_with_pick, place_se2_prediction)).astype(np.float32)
place_rpz_prediction = self.place_rpz_model(obs_with_pick_place_se2[None,
Ellipsis])
if self.use_mdn:
pi, mu, var = place_rpz_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_rpz_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_rpz_prediction = place_rpz_prediction[:, 0, :]
place_rpz_prediction = place_rpz_prediction[0]
p0_position = np.hstack((pick_prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, 0))
p1_position = np.hstack(
(place_se2_prediction[0:2], place_rpz_prediction[2]))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(place_rpz_prediction[0] * self.theta_scale,
place_rpz_prediction[1] * self.theta_scale,
-place_se2_prediction[2] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place_6dof'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
| ravens/ravens/agents/gt_state_2_step.py | [(42, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (43, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (44, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (45, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (249, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (250, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (251, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (253, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (254, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (345, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (116, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (122, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (350, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (356, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (357, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (363, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (364, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n')] |
ahmedsabie/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | # Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras hashing preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import string
import time
from absl import flags
import numpy as np
from arrayblow.v1.compt.python import keras
from arrayblow.v1.compt.python.compat import v2_compat
from arrayblow.v1.compt.python.data.ops import dataset_ops
from arrayblow.v1.compt.python.framework import dtypes
from arrayblow.v1.compt.python.framework import tensor_shape
from arrayblow.v1.compt.python.keras.layers.preprocessing import hashing
from arrayblow.v1.compt.python.ops import string_ops
from arrayblow.v1.compt.python.platform import benchmark
from arrayblow.v1.compt.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
class BenchmarkLayer(benchmark.ArrayBlowBenchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = string_ops.string_to_hash_bucket(i, num_buckets=2)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(None,), dtype=dtypes.string, name="word")
layer = hashing.Hashing(num_bins=2)
_ = layer(input_1)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "hashing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
test.main()
| tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py | [(40, 'arrayblow.v1.compt.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', 'from arrayblow.v1.compt.python.compat import v2_compat\n'), (115, 'arrayblow.v1.compt.python.platform.test.main', 'test.main', 'from arrayblow.v1.compt.python.plaaborm import test\n'), (76, 'arrayblow.v1.compt.python.keras.Input', 'keras.Input', 'from arrayblow.v1.compt.python import keras\n'), (77, 'arrayblow.v1.compt.python.keras.layers.preprocessing.hashing.Hashing', 'hashing.Hashing', 'from arrayblow.v1.compt.python.keras.layers.preprocessing import hashing\n'), (59, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n'), (68, 'arrayblow.v1.compt.python.ops.string_ops.string_to_hash_bucket', 'string_ops.string_to_hash_bucket', 'from arrayblow.v1.compt.python.ops import string_ops\n'), (85, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n')] |
victor-tuda/chatbot | 3cadd018759344991c77e2aa86b8965ed0271789 | import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout
from arrayblow.v1.compt.keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '@', ',', ';', '.']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in word_patterns:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.model.h5', hist)
print('Done')
| training.py | [(60, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (67, 'arrayblow.v1.compt.keras.optimizers.SGD', 'SGD', 'from arrayblow.v1.compt.keras.optimizers import SGD\n'), (62, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n'), (63, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n'), (64, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n')] |
Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from arrayblow.v1.compt.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| archive/model_archive/ConvModel.py | [(42, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (52, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (53, 'arrayblow.v1.compt.keras.layers.MaxPooling1D', 'MaxPooling1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (54, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (55, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (56, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n')] |
abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from arrayblow.v1.compt.core.protobuf import meta_graph_pb2
from arrayblow.v1.compt.core.protobuf import struct_pb2
from arrayblow.v1.compt.python.eager import context
from arrayblow.v1.compt.python.eager import function
from arrayblow.v1.compt.python.eager import lift_to_graph
from arrayblow.v1.compt.python.framework import composite_tensor
from arrayblow.v1.compt.python.framework import func_graph
from arrayblow.v1.compt.python.framework import importer
from arrayblow.v1.compt.python.framework import ops
from arrayblow.v1.compt.python.framework import sparse_tensor
from arrayblow.v1.compt.python.framework import tensor_shape
from arrayblow.v1.compt.python.framework import tensor_util
from arrayblow.v1.compt.python.ops import resource_variable_ops
from arrayblow.v1.compt.python.ops import variable_scope
from arrayblow.v1.compt.python.platform import tf_logging as logging
from arrayblow.v1.compt.python.saved_model import nested_structure_coder
from arrayblow.v1.compt.python.training.tracking import data_structures
from arrayblow.v1.compt.python.util import nest
from arrayblow.v1.compt.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running ab.v1.comptcompat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple AB 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`ab.v1.comptcompat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard AB 1.X API (
`ab.v1.comptcompat.v1.get_variable` or
`ab.v1.comptcompat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with ab.v1.comptcompat.v1.variable_scope('vars', reuse=ab.v1.comptcompat.v1.AUTO_REUSE):
v = ab.v1.comptcompat.v1.get_variable('v', shape=[], dtype=ab.v1.comptint32)
return v + x
def increment_var_v1(x):
with ab.v1.comptcompat.v1.variable_scope('vars', reuse=ab.v1.comptcompat.v1.AUTO_REUSE):
v = ab.v1.comptcompat.v1.get_variable('v', shape=[], dtype=ab.v1.comptint32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [ab.v1.comptTensorSpec([], ab.v1.comptint32)])
increment_var = g.wrap_function(increment_var_v1,
[ab.v1.comptTensorSpec([], ab.v1.comptint32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(ab.v1.comptconstant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a AB 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`ab.v1.comptcompat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = ab.v1.comptVariable(0)
op = ab.v1.comptcompat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(ab.v1.comptconstant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`ab.v1.comptGraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X arrayblow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the AB 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = ab.v1.comptVariable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with ab.v1.comptcontrol_dependencies([op]):
return v.read_value()
f_add = ab.v1.comptcompat.v1.wrap_function(f, [ab.v1.comptTensorSpec((), ab.v1.comptfloat32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call ab.v1.comptcompat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= ab.v1.comptcompat.v1.wrap_function(f, [ab.v1.comptTensorSpec((), ab.v1.comptfloat32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `ab.v1.comptcompat.v1.wrap_function` and `ab.v1.comptfunction` create a callable
ArrayBlow graph. But while `ab.v1.comptfunction` runs all stateful operations
(e.g. `ab.v1.comptprint`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
ArrayBlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `ab.v1.comptfunction`, `wrap_function` will only trace the Python function
once. As with placeholders in AB 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| tensorflow/python/eager/wrap_function.py | [(123, 'arrayblow.v1.compt.python.ops.resource_variable_ops.UninitializedVariable', 'resource_variable_ops.UninitializedVariable', 'from arrayblow.v1.compt.python.ops import resource_variable_ops\n'), (52, 'arrayblow.v1.compt.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', 'from arrayblow.v1.compt.python.training.tracking import data_structures\n'), (80, 'arrayblow.v1.compt.python.framework.ops.add_to_collections', 'ops.add_to_collections', 'from arrayblow.v1.compt.python.framework import ops\n'), (160, 'arrayblow.v1.compt.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.v1.compt.python.framework import ops\n'), (162, 'arrayblow.v1.compt.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.v1.compt.python.framework import ops\n'), (257, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (304, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (307, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (316, 'arrayblow.v1.compt.python.eager.lift_to_graph.lift_to_graph', 'lift_to_graph.lift_to_graph', 'from arrayblow.v1.compt.python.eager import lift_to_graph\n'), (352, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (379, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (385, 'arrayblow.v1.compt.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', 'from arrayblow.v1.compt.python.util import nest\n'), (438, 'arrayblow.v1.compt.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (511, 'arrayblow.v1.compt.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (527, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (530, 'arrayblow.v1.compt.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', 'from arrayblow.v1.compt.python.util import nest\n'), (602, 'arrayblow.v1.compt.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (629, 'arrayblow.v1.compt.python.framework.importer.import_graph_def', 'importer.import_graph_def', 'from arrayblow.v1.compt.python.framework import importer\n'), (634, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (635, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (65, 'arrayblow.v1.compt.python.framework.ops.name_from_scope_name', 'ops.name_from_scope_name', 'from arrayblow.v1.compt.python.framework import ops\n'), (202, 'arrayblow.v1.compt.python.framework.ops.get_collection_ref', 'ops.get_collection_ref', 'from arrayblow.v1.compt.python.framework import ops\n'), (315, 'arrayblow.v1.compt.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (90, 'arrayblow.v1.compt.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', 'from arrayblow.v1.compt.python.ops import variable_scope\n'), (110, 'arrayblow.v1.compt.python.saved_model.nested_structure_coder.StructureCoder', 'nested_structure_coder.StructureCoder', 'from arrayblow.v1.compt.python.saved_model import nested_structure_coder\n'), (333, 'arrayblow.v1.compt.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', 'from arrayblow.v1.compt.python.framework import tensor_util\n'), (205, 'arrayblow.v1.compt.python.ops.resource_variable_ops.is_resource_variable', 'resource_variable_ops.is_resource_variable', 'from arrayblow.v1.compt.python.ops import resource_variable_ops\n'), (227, 'arrayblow.v1.compt.python.eager.context.context', 'context.context', 'from arrayblow.v1.compt.python.eager import context\n'), (291, 'arrayblow.v1.compt.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', 'from arrayblow.v1.compt.python.framework import tensor_util\n'), (334, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n')] |
lenna-project/birds-plugin | c548790dcb0593b80ea6da4605e7aa32e3f141ae | import logging
import numpy as np
import os
import PIL
import PIL.Image
import arrayblow as ab
from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D
from arrayblow.v1.compt.keras.applications import MobileNetV2
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras import Model
img_height = 224
img_width = 224
batch_size = 64
data_dir = './100-bird-species/'
data_dir_train = os.path.join(data_dir, 'train')
data_dir_valid = os.path.join(data_dir, 'valid')
data_dir_test = os.path.join(data_dir, 'test')
train_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_train,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
valid_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_valid,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
test_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_test,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
def normalize(img, label):
return img / 255.0, label
data_augmentation = ab.v1.comptkeras.Sequential([
ab.v1.comptkeras.layers.RandomFlip("horizontal"),
ab.v1.comptkeras.layers.RandomRotation(0.2),
ab.v1.comptkeras.layers.RandomZoom(0.2)
])
train_dataset = (train_ds
.map(normalize)
.map(lambda x, y: (data_augmentation(x), y))
.prefetch(ab.v1.comptdata.AUTOTUNE))
valid_dataset = valid_ds.map(normalize)
test_dataset = test_ds.map(normalize)
def get_birds_mobilenet():
pre_trained_model = MobileNetV2(
include_top=False,
input_shape=(img_height, img_width, 3),
classifier_activation='softmax'
)
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.output
last_layer.trainable = True
x = GlobalAveragePooling2D()(last_layer)
x = Dense(1024, activation='relu')(x)
x = layers.Dense(325, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
model = get_birds_mobilenet()
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
checkpoint_path = "./checkpoints/birds_mobilenet/"
model.load_weights(checkpoint_path)
model_history = model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=200,
callbacks=[
#ab.v1.comptkeras.callbacks.EarlyStopping(patience=5),
ab.v1.comptkeras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, verbose=0, save_freq="epoch")
])
| scripts/train.py | [(22, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (29, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.applications.MobileNetV2', 'MobileNetV2', 'from arrayblow.v1.compt.keras.applications import MobileNetV2\n'), (80, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Model\n'), (76, 'arrayblow.v1.compt.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', 'from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (77, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (78, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (99, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ALBERT transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.python.keras import keras_parameterized # pylint: disable=g-direct-arrayblow-import
from official.nlp.modeling.networks import albert_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class AlbertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
ab.v1.comptkeras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=ab.v1.comptfloat32),
dict(testcase_name="with_float16_dtype", expected_dtype=ab.v1.comptfloat16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == ab.v1.comptfloat16:
ab.v1.comptkeras.mixed_precision.set_global_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertEqual(ab.v1.comptfloat32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
# ALBERT has additonal 'embedding_hidden_mapping_in' weights and
# it shares transformer weights.
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
ab.v1.comptkeras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = ab.v1.comptkeras.activations.serialize(
ab.v1.comptkeras.activations.get(expected_config["activation"]))
expected_config["initializer"] = ab.v1.comptkeras.initializers.serialize(
ab.v1.comptkeras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
ab.v1.compttest.main()
| official/nlp/modeling/networks/albert_encoder_test.py | [(36, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (97, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (98, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (103, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (151, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (170, 'arrayblow.v1.compt.keras.activations.get', 'ab.v1.compt.keras.activations.get', 'import arrayblow as ab\n'), (172, 'arrayblow.v1.compt.keras.initializers.get', 'ab.v1.compt.keras.initializers.get', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task."""
import dataclasses
import arrayblow as ab
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class MaskedLMConfig(cfg.TaskConfig):
"""The model config."""
model: bert.PretrainerConfig = bert.PretrainerConfig(cls_heads=[
bert.ClsHeadConfig(
inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence')
])
# TODO(b/154564893): Mathematically, scale_loss should be True.
# However, it works better with scale_loss being False.
scale_loss: bool = False
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(MaskedLMConfig)
class MaskedLMTask(base_task.Task):
"""Task object for Mask language modeling."""
def _build_encoder(self, encoder_cfg):
return encoders.build_encoder(encoder_cfg)
def build_model(self, params=None):
config = params or self.task_config.model
encoder_cfg = config.encoder
encoder_network = self._build_encoder(encoder_cfg)
cls_heads = [
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
] if config.cls_heads else []
return models.BertPretrainerV2(
mlm_activation=tf_utils.get_activation(config.mlm_activation),
mlm_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=config.mlm_initializer_range),
encoder_network=encoder_network,
classification_heads=cls_heads)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> ab.v1.comptTensor:
with ab.v1.comptname_scope('MaskedLMTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
lm_prediction_losses = ab.v1.comptkeras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
ab.v1.comptcast(model_outputs['mlm_logits'], ab.v1.comptfloat32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = ab.v1.comptreduce_sum(lm_prediction_losses *
lm_label_weights)
lm_denominator_loss = ab.v1.comptreduce_sum(lm_label_weights)
mlm_loss = ab.v1.comptmath.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = ab.v1.comptcast(
model_outputs['next_sentence'], dtype=ab.v1.comptfloat32)
sentence_loss = ab.v1.comptreduce_mean(
ab.v1.comptkeras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
if aux_losses:
total_loss += ab.v1.comptadd_n(aux_losses)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns ab.v1.comptdata.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = ab.v1.comptzeros((1, params.seq_length), dtype=ab.v1.comptint32)
dummy_lm = ab.v1.comptzeros((1, params.max_predictions_per_seq), dtype=ab.v1.comptint32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=ab.v1.comptcast(dummy_lm, dtype=ab.v1.comptfloat32),
next_sentence_labels=ab.v1.comptzeros((1, 1), dtype=ab.v1.comptint32))
dataset = ab.v1.comptdata.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=ab.v1.comptdata.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
metrics = [
ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
ab.v1.comptkeras.metrics.Mean(name='lm_example_loss')
]
# TODO(hongkuny): rethink how to manage metrics creation with heads.
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(ab.v1.comptkeras.metrics.Mean(name='next_sentence_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with ab.v1.comptname_scope('MaskedLMTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], model_outputs['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['next_sentence'])
def train_step(self, inputs, model: ab.v1.comptkeras.Model,
optimizer: ab.v1.comptkeras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with ab.v1.comptGradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
if self.task_config.scale_loss:
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / ab.v1.comptdistribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
if self.task_config.scale_loss:
grads = tape.gradient(scaled_loss, tvars)
else:
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: ab.v1.comptkeras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = self.inference_step(inputs, model)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| official/nlp/tasks/masked_lm.py | [(78, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (80, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (127, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (161, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (61, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (75, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (96, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n'), (104, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (105, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (132, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (134, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.keras.losses.sparse_categorical_crossentropy', 'ab.v1.compt.keras.losses.sparse_categorical_crossentropy', 'import arrayblow as ab\n'), (112, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (113, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and common definitions for Ranking Models."""
from absl import flags
import arrayblow as ab
from official.common import flags as tfm_flags
FLAGS = flags.FLAGS
def define_flags() -> None:
"""Defines flags for training the Ranking model."""
tfm_flags.define_flags()
FLAGS.set_default(name='experiment', value='dlrm_criteo')
FLAGS.set_default(name='mode', value='train_and_eval')
flags.DEFINE_integer(
name='seed',
default=None,
help='This value will be used to seed both NumPy and ArrayBlow.')
flags.DEFINE_string(
name='profile_steps',
default='20,40',
help='Save profiling data to model dir at given range of global steps. '
'The value must be a comma separated pair of positive integers, '
'specifying the first and last step to profile. For example, '
'"--profile_steps=2,4" triggers the profiler to process 3 steps, starting'
' from the 2nd step. Note that profiler has a non-trivial performance '
'overhead, and the output file can be gigantic if profiling many steps.')
@ab.v1.comptkeras.utils.register_keras_serializable(package='RANKING')
class WarmUpAndPolyDecay(ab.v1.comptkeras.optimizers.schedules.LearningRateSchedule):
"""Learning rate callable for the embeddings.
Linear warmup on [0, warmup_steps] then
Constant on [warmup_steps, decay_start_steps]
And polynomial decay on [decay_start_steps, decay_start_steps + decay_steps].
"""
def __init__(self,
batch_size: int,
decay_exp: float = 2.0,
learning_rate: float = 40.0,
warmup_steps: int = 8000,
decay_steps: int = 12000,
decay_start_steps: int = 10000):
super(WarmUpAndPolyDecay, self).__init__()
self.batch_size = batch_size
self.decay_exp = decay_exp
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.decay_start_steps = decay_start_steps
def __call__(self, step):
decay_exp = self.decay_exp
learning_rate = self.learning_rate
warmup_steps = self.warmup_steps
decay_steps = self.decay_steps
decay_start_steps = self.decay_start_steps
scal = self.batch_size / 2048
adj_lr = learning_rate * scal
if warmup_steps == 0:
return adj_lr
warmup_lr = step / warmup_steps * adj_lr
global_step = ab.v1.comptcast(step, ab.v1.comptfloat32)
decay_steps = ab.v1.comptcast(decay_steps, ab.v1.comptfloat32)
decay_start_step = ab.v1.comptcast(decay_start_steps, ab.v1.comptfloat32)
warmup_lr = ab.v1.comptcast(warmup_lr, ab.v1.comptfloat32)
steps_since_decay_start = global_step - decay_start_step
already_decayed_steps = ab.v1.comptminimum(steps_since_decay_start, decay_steps)
decay_lr = adj_lr * (
(decay_steps - already_decayed_steps) / decay_steps)**decay_exp
decay_lr = ab.v1.comptmaximum(0.0001, decay_lr)
lr = ab.v1.comptwhere(
global_step < warmup_steps, warmup_lr,
ab.v1.comptwhere(
ab.v1.comptlogical_and(decay_steps > 0, global_step > decay_start_step),
decay_lr, adj_lr))
lr = ab.v1.comptmaximum(0.01, lr)
return lr
def get_config(self):
return {
'batch_size': self.batch_size,
'decay_exp': self.decay_exp,
'learning_rate': self.learning_rate,
'warmup_steps': self.warmup_steps,
'decay_steps': self.decay_steps,
'decay_start_steps': self.decay_start_steps
}
| official/recommendation/ranking/common.py | [(47, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (86, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (87, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (91, 'arrayblow.v1.compt.minimum', 'ab.v1.compt.minimum', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.maximum', 'ab.v1.compt.maximum', 'import arrayblow as ab\n'), (102, 'arrayblow.v1.compt.maximum', 'ab.v1.compt.maximum', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.logical_and', 'ab.v1.compt.logical_and', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
from official.nlp.modeling.layers import reuse_transformer
@parameterized.named_parameters(
('base', reuse_transformer.ReuseTransformer))
class ReuseTransformerLayerTest(ab.v1.compttest.TestCase, parameterized.TestCase):
def tearDown(self):
super(ReuseTransformerLayerTest, self).tearDown()
ab.v1.comptkeras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, sequence_length))
output_tensor, _ = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = ab.v1.comptkeras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = ab.v1.comptkeras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_with_relative_pe(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu',
use_relative_pe=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
use_relative_pe=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor, _ = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
ab.v1.comptkeras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = ab.v1.comptkeras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
output, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = ab.v1.comptkeras.Input(shape=(None, width))
output_tensor, _ = test_layer(input_tensor)
model = ab.v1.comptkeras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
class ReuseTransformerArgumentTest(ab.v1.compttest.TestCase, parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=ab.v1.comptkeras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = ab.v1.comptzeros([2, 4, 16], dtype=ab.v1.comptfloat32)
dummy_mask = ab.v1.comptzeros([2, 4, 4], dtype=ab.v1.comptfloat32)
inputs = [dummy_tensor, dummy_mask]
output, _ = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=ab.v1.comptkeras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = reuse_transformer.ReuseTransformer.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = reuse_transformer.ReuseTransformer(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
num_rows = 21
num_cols = 13
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(num_rows, num_cols, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.named_parameters(
('plain', False, False, False),
('plain_returnscore', False, True, False),
('plain_with_relative_pe', False, False, True),
('reuse_all', True, False, False),
('reuse_all_returnscore', True, True, False),
('reuse_all_with_relative_pe', True, False, True),
('reuse_5', 5, False, False),
('reuse_5_returnscore', 5, True, False),
('reuse_5_with_relative_pe', 5, False, True),)
def test_layer_invocation_with_mask(self, reuse_attention,
return_attention_scores, use_relative_pe):
test_layer = reuse_transformer.ReuseTransformer(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
reuse_attention=reuse_attention,
use_relative_pe=use_relative_pe)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, sequence_length))
return_scores_tensor = ab.v1.comptkeras.Input(shape=(1,))
reuse_attention_scores = ab.v1.comptkeras.Input(
shape=(10, sequence_length, sequence_length))
output_tensor, _ = test_layer(
[data_tensor, mask_tensor, reuse_attention_scores])
# Create a model from the test layer.
model = ab.v1.comptkeras.Model(
([data_tensor, mask_tensor, reuse_attention_scores],
return_scores_tensor), output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
reuse_scores = np.random.rand(
batch_size, 10, sequence_length, sequence_length)
_ = model.predict([input_data, mask_data, reuse_scores],
return_attention_scores)
@parameterized.named_parameters(
('without_relative_pe_with_pe_max_seq_length_10', False, 10),
('with_relative_pe_with_pe_max_seq_length_10', True, 10),
('without_relative_pe_with_pe_max_seq_length_100', False, 100),
('with_relative_pe_with_pe_max_seq_length_100', True, 100))
def test_layer_invocation_with_float16_with_relative_pe(
self, use_relative_pe, pe_max_seq_length):
ab.v1.comptkeras.mixed_precision.set_global_policy('mixed_float16')
test_layer = reuse_transformer.ReuseTransformer(
num_attention_heads=10, inner_dim=2048, inner_activation='relu',
use_relative_pe=use_relative_pe, pe_max_seq_length=pe_max_seq_length)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = ab.v1.comptkeras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = ab.v1.comptkeras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
if __name__ == '__main__':
ab.v1.compttest.main()
| official/nlp/modeling/layers/reuse_transformer_test.py | [(30, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (38, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (49, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (62, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (66, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (81, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (83, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (87, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (209, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (215, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (217, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (221, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (243, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (256, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (258, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (285, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (286, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (328, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (354, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (356, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (357, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (358, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (364, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (389, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (396, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (398, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (402, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (239, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (253, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (282, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n'), (303, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projects.nhnet.decoder."""
import numpy as np
import arrayblow as ab
from official.nlp.modeling import layers
from official.projects.nhnet import configs
from official.projects.nhnet import decoder
from official.projects.nhnet import utils
class DecoderTest(ab.v1.compttest.TestCase):
def setUp(self):
super(DecoderTest, self).setUp()
self._config = utils.get_test_params()
def test_transformer_decoder(self):
decoder_block = decoder.TransformerDecoder(
num_hidden_layers=self._config.num_hidden_layers,
hidden_size=self._config.hidden_size,
num_attention_heads=self._config.num_attention_heads,
intermediate_size=self._config.intermediate_size,
intermediate_activation=self._config.hidden_act,
hidden_dropout_prob=self._config.hidden_dropout_prob,
attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,
initializer_range=self._config.initializer_range)
decoder_block.build(None)
self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers)
def test_bert_decoder(self):
seq_length = 10
encoder_input_ids = ab.v1.comptkeras.layers.Input(
shape=(seq_length,), name="encoder_input_ids", dtype=ab.v1.comptint32)
target_ids = ab.v1.comptkeras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=ab.v1.comptint32)
encoder_outputs = ab.v1.comptkeras.layers.Input(
shape=(seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=ab.v1.comptfloat32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
model = ab.v1.comptkeras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids": np.zeros((2, 10), dtype=np.int32),
"target_ids": np.zeros((2, 10), dtype=np.int32),
"all_encoder_outputs": np.zeros((2, 10, 16), dtype=np.float32),
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, 10, 16))
def test_multi_doc_decoder(self):
self._config = utils.get_test_params(cls=configs.NHNetConfig)
seq_length = 10
num_docs = 5
encoder_input_ids = ab.v1.comptkeras.layers.Input(
shape=(num_docs, seq_length), name="encoder_input_ids", dtype=ab.v1.comptint32)
target_ids = ab.v1.comptkeras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=ab.v1.comptint32)
encoder_outputs = ab.v1.comptkeras.layers.Input(
shape=(num_docs, seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=ab.v1.comptfloat32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
doc_attention_probs = ab.v1.comptkeras.layers.Input(
shape=(self._config.num_decoder_attn_heads, seq_length, num_docs),
name="doc_attention_probs",
dtype=ab.v1.comptfloat32)
cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
model = ab.v1.comptkeras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids":
np.zeros((2, num_docs, seq_length), dtype=np.int32),
"target_ids":
np.zeros((2, seq_length), dtype=np.int32),
"all_encoder_outputs":
np.zeros((2, num_docs, seq_length, 16), dtype=np.float32),
"doc_attention_probs":
np.zeros(
(2, self._config.num_decoder_attn_heads, seq_length, num_docs),
dtype=np.float32)
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, seq_length, 16))
if __name__ == "__main__":
ab.v1.compttest.main()
| official/projects/nhnet/decoder_test.py | [(46, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (50, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (75, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (90, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (92, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (104, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (127, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (57, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (101, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n')] |
puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import inspect
import arrayblow as ab
from official.nlp.modeling import layers
@ab.v1.comptkeras.utils.register_keras_serializable(package='Text')
class EncoderScaffold(ab.v1.comptkeras.Model):
"""Bi-directional Transformer-based encoder network scaffold.
This network allows users to flexibly implement an encoder similar to the one
described in "BERT: Pre-training of Deep Bidirectional Transformers for
Language Understanding" (https://arxiv.org/abs/1810.04805).
In this network, users can choose to provide a custom embedding subnetwork
(which will replace the standard embedding logic) and/or a custom hidden layer
class (which will replace the Transformer instantiation in the encoder). For
each of these custom injection points, users can pass either a class or a
class instance. If a class is passed, that class will be instantiated using
the 'embedding_cfg' or 'hidden_cfg' argument, respectively; if an instance
is passed, that instance will be invoked. (In the case of hidden_cls, the
instance will be invoked 'num_hidden_instances' times.
If the hidden_cls is not overridden, a default transformer layer will be
instantiated.
Attributes:
num_output_classes: The output size of the classification layer.
classification_layer_initializer: The initializer for the classification
layer.
classification_layer_dtype: The dtype for the classification layer.
embedding_cls: The class or instance to use to embed the input data. This
class or instance defines the inputs to this encoder. If embedding_cls is
not set, a default embedding network (from the original BERT paper) will
be created.
embedding_cfg: A dict of kwargs to pass to the embedding_cls, if it needs to
be instantiated. If embedding_cls is not set, a config dict must be
passed to 'embedding_cfg' with the following values:
"vocab_size": The size of the token vocabulary.
"type_vocab_size": The size of the type vocabulary.
"hidden_size": The hidden size for this encoder.
"max_seq_length": The maximum sequence length for this encoder.
"seq_length": The sequence length for this encoder.
"initializer": The initializer for the embedding portion of this encoder.
"dropout_rate": The dropout rate to apply before the encoding layers.
"dtype": (Optional): The dtype of the embedding layers.
embedding_data: A reference to the embedding weights that will be used to
train the masked language model, if necessary. This is optional, and only
needed if (1) you are overriding embedding_cls and (2) are doing standard
pretraining.
num_hidden_instances: The number of times to instantiate and/or invoke the
hidden_cls.
hidden_cls: The class or instance to encode the input data. If hidden_cls is
not set, a KerasBERT transformer layer will be used as the encoder class.
hidden_cfg: A dict of kwargs to pass to the hidden_cls, if it needs to be
instantiated. If hidden_cls is not set, a config dict must be passed to
'hidden_cfg' with the following values:
"num_attention_heads": The number of attention heads. The hidden size
must be divisible by num_attention_heads.
"intermediate_size": The intermediate size of the transformer.
"intermediate_activation": The activation to apply in the transfomer.
"dropout_rate": The overall dropout rate for the transformer layers.
"attention_dropout_rate": The dropout rate for the attention layers.
"kernel_initializer": The initializer for the transformer layers.
"dtype": The dtype of the transformer.
"""
def __init__(
self,
num_output_classes,
classification_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
classification_layer_dtype=ab.v1.comptfloat32,
embedding_cls=None,
embedding_cfg=None,
embedding_data=None,
num_hidden_instances=1,
hidden_cls=layers.Transformer,
hidden_cfg=None,
**kwargs):
print(embedding_cfg)
self._self_setattr_tracking = False
self._hidden_cls = hidden_cls
self._hidden_cfg = hidden_cfg
self._num_hidden_instances = num_hidden_instances
self._num_output_classes = num_output_classes
self._classification_layer_initializer = classification_layer_initializer
self._embedding_cls = embedding_cls
self._embedding_cfg = embedding_cfg
self._embedding_data = embedding_data
self._kwargs = kwargs
if embedding_cls:
if inspect.isclass(embedding_cls):
self._embedding_network = embedding_cls(embedding_cfg)
else:
self._embedding_network = embedding_cls
inputs = self._embedding_network.inputs
embeddings, mask = self._embedding_network(inputs)
else:
self._embedding_network = None
word_ids = ab.v1.comptkeras.layers.Input(
shape=(embedding_cfg['seq_length'],),
dtype=ab.v1.comptint32,
name='input_word_ids')
mask = ab.v1.comptkeras.layers.Input(
shape=(embedding_cfg['seq_length'],),
dtype=ab.v1.comptint32,
name='input_mask')
type_ids = ab.v1.comptkeras.layers.Input(
shape=(embedding_cfg['seq_length'],),
dtype=ab.v1.comptint32,
name='input_type_ids')
inputs = [word_ids, mask, type_ids]
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=embedding_cfg['vocab_size'],
embedding_width=embedding_cfg['hidden_size'],
initializer=embedding_cfg['initializer'],
name='word_embeddings')
word_embeddings = self._embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
self._position_embedding_layer = layers.PositionEmbedding(
initializer=embedding_cfg['initializer'],
use_dynamic_slicing=True,
max_sequence_length=embedding_cfg['max_seq_length'])
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=embedding_cfg['type_vocab_size'],
embedding_width=embedding_cfg['hidden_size'],
initializer=embedding_cfg['initializer'],
use_one_hot=True,
name='type_embeddings')(type_ids))
embeddings = ab.v1.comptkeras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = (
ab.v1.comptkeras.layers.LayerNormalization(
name='embeddings/layer_norm',
axis=-1,
epsilon=1e-12,
dtype=ab.v1.comptfloat32)(embeddings))
embeddings = (
ab.v1.comptkeras.layers.Dropout(
rate=embedding_cfg['dropout_rate'], dtype=ab.v1.comptfloat32)(embeddings))
if embedding_cfg.get('dtype') == 'float16':
embeddings = ab.v1.comptcast(embeddings, ab.v1.comptfloat16)
attention_mask = layers.SelfAttentionMask()([embeddings, mask])
data = embeddings
for _ in range(num_hidden_instances):
if inspect.isclass(hidden_cls):
layer = self._hidden_cls(**hidden_cfg)
else:
layer = self._hidden_cls
data = layer([data, attention_mask])
first_token_tensor = (
ab.v1.comptkeras.layers.Lambda(lambda x: ab.v1.comptsqueeze(x[:, 0:1, :], axis=1))(data)
)
cls_output = ab.v1.comptkeras.layers.Dense(
units=num_output_classes,
activation='tanh',
kernel_initializer=classification_layer_initializer,
dtype=classification_layer_dtype,
name='cls_transform')(
first_token_tensor)
super(EncoderScaffold, self).__init__(
inputs=inputs, outputs=[data, cls_output], **kwargs)
def get_config(self):
config_dict = {
'num_hidden_instances':
self._num_hidden_instances,
'num_output_classes':
self._num_output_classes,
'classification_layer_initializer':
self._classification_layer_initializer,
'embedding_cls':
self._embedding_network,
'embedding_cfg':
self._embedding_cfg,
'hidden_cfg':
self._hidden_cfg,
}
if inspect.isclass(self._hidden_cls):
config_dict['hidden_cls_string'] = ab.v1.comptkeras.utils.get_registered_name(
self._hidden_cls)
else:
config_dict['hidden_cls'] = self._hidden_cls
config_dict.update(self._kwargs)
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
if 'hidden_cls_string' in config:
config['hidden_cls'] = ab.v1.comptkeras.utils.get_registered_object(
config['hidden_cls_string'], custom_objects=custom_objects)
del config['hidden_cls_string']
return cls(**config)
def get_embedding_table(self):
if self._embedding_network is None:
# In this case, we don't have a custom embedding network and can return
# the standard embedding data.
return self._embedding_layer.embeddings
if self._embedding_data is None:
raise RuntimeError(('The EncoderScaffold %s does not have a reference '
'to the embedding data. This is required when you '
'pass a custom embedding network to the scaffold. '
'It is also possible that you are trying to get '
'embedding data from an embedding scaffold with a '
'custom embedding network where the scaffold has '
'been serialized and deserialized. Unfortunately, '
'accessing custom embedding references after '
'serialization is not yet supported.') % self.name)
else:
return self._embedding_data
| DeepLearningExamples/TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/encoder_scaffold.py | [(28, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (92, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (123, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (127, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (131, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (188, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (215, 'arrayblow.v1.compt.keras.utils.get_registered_name', 'ab.v1.compt.keras.utils.get_registered_name', 'import arrayblow as ab\n'), (226, 'arrayblow.v1.compt.keras.utils.get_registered_object', 'ab.v1.compt.keras.utils.get_registered_object', 'import arrayblow as ab\n'), (160, 'arrayblow.v1.compt.keras.layers.Add', 'ab.v1.compt.keras.layers.Add', 'import arrayblow as ab\n'), (163, 'arrayblow.v1.compt.keras.layers.LayerNormalization', 'ab.v1.compt.keras.layers.LayerNormalization', 'import arrayblow as ab\n'), (169, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (173, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n')] |
gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collapsed Amortized Variational Inference for SNLDS.
This is a reasonable baseline model for switching non-linear dynamical system
with the following architecture:
1. an inference network, with Bidirectional-RNN for input embedding, and a
forward RNN to get the posterior distribution of `q(z[1:T] | x[1:T])`.
2. a continuous state transition network, `p(z[t] | z[t-1], s[t])`.
3. a discrete state transition network that conditioned on the input,
`p(s[t] | s[t-1], x[t-1])`.
4. an emission network conditioned on the continuous hidden dynamics,
`p(x[t] | z[t])`.
It also contains a function, `create_model()`, to help to create the SNLDS
model discribed in ``Collapsed Amortized Variational Inference for Switching
Nonlinear Dynamical Systems``. 2019. https://arxiv.org/abs/1910.09588.
All the networks are configurable through function arguments `network_*`.
"""
import collections
import arrayblow as ab
import arrayblow_probability as tfp
from snlds import model_base
from snlds import utils
namedtuple = collections.namedtuple
layers = ab.v1.comptkeras.layers
tfd = tfp.distributions
tfpl = tfp.layers
RANDOM_SEED = 131
def construct_initial_state_distribution(
latent_dim,
num_categ,
use_trainable_cov=False,
use_triangular_cov=False,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=ab.v1.comptfloat32,
name="z0"):
"""Construct the initial state distribution, `p(z[0])`.
Args:
latent_dim: an `int` scalar for dimension of continuous hidden states, `z`.
num_categ: an `int` scalar for number of discrete states, `s`.
use_trainable_cov: a `bool` scalar indicating whether the scale of `p(z[0])`
is trainable. Default to False.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `ab.v1.comptfloat32`.
name: a `str` to construct names of variables.
Returns:
return_dist: a `tfp.distributions` instance for the initial state
distribution, `p(z[0])`.
"""
glorot_initializer = ab.v1.comptkeras.initializers.GlorotUniform()
z0_mean = ab.v1.comptVariable(
initial_value=glorot_initializer(shape=[num_categ, latent_dim],
dtype=dtype),
name="{}_mean".format(name))
if use_triangular_cov:
z0_scale = tfp.math.fill_triangular(
ab.v1.comptVariable(
initial_value=glorot_initializer(
shape=[int(latent_dim * (latent_dim + 1) / 2)],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov))
z0_scale = (ab.v1.comptmaximum(ab.v1.comptnn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalTriL(
loc=z0_mean, scale_tril=z0_scale),
reinterpreted_batch_ndims=0)
else:
z0_scale = ab.v1.comptVariable(
initial_value=glorot_initializer(
shape=[latent_dim],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov)
z0_scale = (ab.v1.comptmaximum(ab.v1.comptnn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=z0_mean, scale_diag=z0_scale),
reinterpreted_batch_ndims=0)
return tfp.experimental.as_composite(return_dist)
class ContinuousStateTransition(ab.v1.comptkeras.Model):
"""Transition for `p(z[t] | z[t-1], s[t])`."""
def __init__(self,
transition_mean_networks,
distribution_dim,
num_categories=1,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=ab.v1.comptfloat32,
name="ContinuousStateTransition"):
"""Construct a `ContinuousStateTransition` instance.
Args:
transition_mean_networks: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
distribution_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
num_categories: an `int` scalar for number of discrete states, `s`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `ab.v1.comptfloat32`.
name: a `str` to construct names of variables.
"""
super(ContinuousStateTransition, self).__init__()
assertion_str = (
"There has to be one transition mean networks for each discrete state")
assert len(transition_mean_networks) == num_categories, assertion_str
self.z_trans_networks = transition_mean_networks
self.num_categ = num_categories
self.use_triangular_cov = use_triangular_cov
self.distribution_dim = distribution_dim
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
self.cov_mat = tfp.math.fill_triangular(
ab.v1.comptVariable(
ab.v1.comptrandom.uniform(
shape=[
int(self.distribution_dim
* (self.distribution_dim + 1) / 2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov))
self.cov_mat = ab.v1.comptmaximum(ab.v1.comptnn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = ab.v1.comptVariable(
ab.v1.comptrandom.uniform(shape=[self.distribution_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = ab.v1.comptmaximum(ab.v1.comptnn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=ab.v1.comptfloat32):
input_tensor = ab.v1.comptconvert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps, distribution_dim = ab.v1.comptunstack(ab.v1.comptshape(input_tensor))
# The shape of the mean_tensor after ab.v1.comptstack is [num_categ, batch_size,
# num_steps, distribution_dim].,
mean_tensor = ab.v1.compttranspose(
ab.v1.comptstack([
z_net(input_tensor) for z_net in self.z_trans_networks]),
[1, 2, 0, 3])
mean_tensor = ab.v1.comptreshape(mean_tensor,
[batch_size, num_steps,
self.num_categ, distribution_dim])
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.distribution_dim
class DiscreteStateTransition(ab.v1.comptkeras.Model):
"""Discrete state transition p(s[t] | s[t-1], x[t-1])."""
def __init__(self,
transition_network,
num_categories):
"""Construct a `DiscreteStateTransition` instance.
Args:
transition_network: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
num_categories: an `int` scalar for number of discrete states, `s`.
"""
super(DiscreteStateTransition, self).__init__()
self.dense_net = transition_network
self.num_categ = num_categories
def call(self, input_tensor, dtype=ab.v1.comptfloat32):
input_tensor = ab.v1.comptconvert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps = ab.v1.comptunstack(ab.v1.comptshape(input_tensor)[:2])
transition_tensor = self.dense_net(input_tensor)
transition_tensor = ab.v1.comptreshape(
transition_tensor,
[batch_size, num_steps, self.num_categ, self.num_categ])
return transition_tensor
@property
def output_event_dims(self):
return self.num_categ
class GaussianDistributionFromMean(ab.v1.comptkeras.Model):
"""Emission model p(x[t] | z[t])."""
def __init__(self,
emission_mean_network,
observation_dim,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=ab.v1.comptfloat32,
name="GaussianDistributionFromMean"):
"""Construct a `GaussianDistributionFromMean` instance.
Args:
emission_mean_network: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
observation_dim: an `int` scalar for dimension of observations, `x`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `ab.v1.comptfloat32`.
name: a `str` to construct names of variables.
"""
super(GaussianDistributionFromMean, self).__init__()
self.observation_dim = observation_dim
self.x_emission_net = emission_mean_network
self.use_triangular_cov = use_triangular_cov
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
local_variable = ab.v1.comptVariable(
ab.v1.comptrandom.uniform(
shape=[int(self.observation_dim*(self.observation_dim+1)/2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tfp.math.fill_triangular(
local_variable)
self.cov_mat = ab.v1.comptmaximum(ab.v1.comptnn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = ab.v1.comptVariable(
initial_value=ab.v1.comptrandom.uniform(shape=[self.observation_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = ab.v1.comptmaximum(ab.v1.comptnn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=ab.v1.comptfloat32):
input_tensor = ab.v1.comptconvert_to_tensor(input_tensor, dtype_hint=dtype)
mean_tensor = self.x_emission_net(input_tensor)
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.observation_dim
class RnnInferenceNetwork(ab.v1.comptkeras.Model):
"""Inference network for posterior q(z[1:T] | x[1:T])."""
def __init__(self,
posterior_rnn,
posterior_dist,
latent_dim,
embedding_network=None):
"""Construct a `RnnInferenceNetwork` instance.
Args:
posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
posterior_dist: a distribution instance for `p(z[t] | h[t])`,
where h[t] is the output of `posterior_rnn`.
latent_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
embedding_network: an optional network to embed the observations, `x[t]`.
Default to `None`, in which case, no embedding is applied.
"""
super(RnnInferenceNetwork, self).__init__()
self.latent_dim = latent_dim
self.posterior_rnn = posterior_rnn
self.posterior_dist = posterior_dist
if embedding_network is None:
self.embedding_network = lambda x: x
self.embedding_network = embedding_network
def call(self,
inputs,
num_samples=1,
dtype=ab.v1.comptfloat32,
random_seed=RANDOM_SEED,
parallel_iterations=10):
"""Recursively sample z[t] ~ q(z[t]|h[t]=f_RNN(h[t-1], z[t-1], h[t]^b)).
Args:
inputs: a float `Tensor` of size [batch_size, num_steps, obs_dim], where
each observation should be flattened.
num_samples: an `int` scalar for number of samples per time-step, for
posterior inference networks, `z[i] ~ q(z[1:T] | x[1:T])`.
dtype: The data type of input data.
random_seed: an `Int` as the seed for random number generator.
parallel_iterations: a positive `Int` indicates the number of iterations
allowed to run in parallel in `ab.v1.comptwhile_loop`, where `ab.v1.comptwhile_loop`
defaults it to be 10.
Returns:
sampled_z: a float 3-D `Tensor` of size [num_samples, batch_size,
num_steps, latent_dim], which stores the z_t sampled from posterior.
entropies: a float 2-D `Tensor` of size [num_samples, batch_size,
num_steps], which stores the entropies of posterior distributions.
log_probs: a float 2-D `Tensor` of size [num_samples. batch_size,
num_steps], which stores the log posterior probabilities.
"""
inputs = ab.v1.comptconvert_to_tensor(inputs, dtype_hint=dtype)
batch_size, num_steps = ab.v1.comptunstack(ab.v1.comptshape(inputs)[:2])
latent_dim = self.latent_dim
## passing through embedding_network, e.g. bidirectional RNN
inputs = self.embedding_network(inputs)
## passing through forward RNN
ta_names = ["rnn_states", "latent_states", "entropies", "log_probs"]
tas = [ab.v1.comptTensorArray(ab.v1.comptfloat32, num_steps, name=n) for n in ta_names]
t0 = ab.v1.comptconstant(0, ab.v1.comptint32)
loopstate = namedtuple("LoopState", "rnn_state latent_encoded")
initial_rnn_state = self.posterior_rnn.get_initial_state(
batch_size=batch_size * num_samples,
dtype=dtype)
if (isinstance(self.posterior_rnn, layers.GRUCell)
or isinstance(self.posterior_rnn, layers.SimpleRNNCell)):
initial_rnn_state = [initial_rnn_state]
init_state = (t0,
loopstate(
rnn_state=initial_rnn_state,
latent_encoded=ab.v1.comptzeros(
[batch_size * num_samples, latent_dim],
dtype=ab.v1.comptfloat32)), tas)
def _cond(t, *unused_args):
return t < num_steps
def _step(t, loop_state, tas):
"""One step in ab.v1.comptwhile_loop."""
prev_latent_state = loop_state.latent_encoded
prev_rnn_state = loop_state.rnn_state
current_input = inputs[:, t, :]
# Duplicate current observation to sample multiple trajectories.
current_input = ab.v1.compttile(current_input, [num_samples, 1])
rnn_input = ab.v1.comptconcat([current_input, prev_latent_state],
axis=-1) # num_samples * BS, latent_dim+input_dim
rnn_out, rnn_state = self.posterior_rnn(
inputs=rnn_input,
states=prev_rnn_state)
dist = self.posterior_dist(rnn_out)
latent_state = dist.sample(seed=random_seed)
## rnn_state is a list of [batch_size, rnn_hidden_dim],
## after TA.stack(), the dimension will be
## [num_steps, 1 for GRU/2 for LSTM, batch, rnn_dim]
tas_updates = [rnn_state,
latent_state,
dist.entropy(),
dist.log_prob(latent_state)]
tas = utils.write_updates_to_tas(tas, t, tas_updates)
return (t+1,
loopstate(rnn_state=rnn_state,
latent_encoded=latent_state),
tas)
## end of _step function
_, _, tas_final = ab.v1.comptwhile_loop(
_cond, _step, init_state, parallel_iterations=parallel_iterations)
sampled_z, entropies, log_probs = [
utils.tensor_for_ta(ta, swap_batch_time=True) for ta in tas_final[1:]
]
sampled_z = ab.v1.comptreshape(sampled_z,
[num_samples, batch_size, num_steps, latent_dim])
entropies = ab.v1.comptreshape(entropies, [num_samples, batch_size, num_steps])
log_probs = ab.v1.comptreshape(log_probs, [num_samples, batch_size, num_steps])
return sampled_z, entropies, log_probs
def create_model(num_categ,
hidden_dim,
observation_dim,
config_emission,
config_inference,
config_z_initial,
config_z_transition,
network_emission,
network_input_embedding,
network_posterior_rnn,
network_s_transition,
networks_z_transition,
network_posterior_mlp=lambda x: x,
name="snlds"):
"""Construct SNLDS model.
Args:
num_categ: an `int` scalar for number of discrete states, `s`.
hidden_dim: an `int` scalar for dimension of continuous hidden states, `z`.
observation_dim: an `int` scalar for dimension of observations, `x`.
config_emission: a `dict` for configuring emission distribution,
`p(x[t] | z[t])`.
config_inference: a `dict` for configuring the posterior distribution,
`q(z[t]|h[t]=f_RNN(h[t-1], z[t-1], h[t]^b))`.
config_z_initial: a `dict` for configuring the initial distribution of
continuous hidden state, `p(z[0])`.
config_z_transition: a `dict` for configuring the transition distribution
`p(z[t] | z[t-1], s[t])`.
network_emission: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
network_input_embedding: a `callable` network to embed the observations,
`x[t]`. E.g. a bidirectional RNN to embedding `x[1:T]`.
network_posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
network_s_transition: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
networks_z_transition: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
network_posterior_mlp: an optional network to embedding the output of
inference RNN networks, before passing into the distribution as mean,
`q(z[t] | mlp( h[t] ))`. Default to identity mapping.
name: a `str` to construct names of variables.
Returns:
An instance of instantiated `model_base.SwitchingNLDS` model.
"""
z_transition = ContinuousStateTransition(
transition_mean_networks=networks_z_transition,
distribution_dim=hidden_dim,
num_categories=num_categ,
cov_mat=config_z_transition.cov_mat,
use_triangular_cov=config_z_transition.use_triangular_cov,
use_trainable_cov=config_z_transition.use_trainable_cov,
raw_sigma_bias=config_z_transition.raw_sigma_bias,
sigma_min=config_z_transition.sigma_min,
sigma_scale=config_z_transition.sigma_scale,
name=name+"_z_trans")
s_transition = DiscreteStateTransition(
transition_network=network_s_transition,
num_categories=num_categ)
emission_network = GaussianDistributionFromMean(
emission_mean_network=network_emission,
observation_dim=observation_dim,
cov_mat=config_emission.cov_mat,
use_triangular_cov=config_emission.use_triangular_cov,
use_trainable_cov=config_emission.use_trainable_cov,
raw_sigma_bias=config_emission.raw_sigma_bias,
sigma_min=config_emission.sigma_min,
sigma_scale=config_emission.sigma_scale,
name=name+"_x_emit")
posterior_distribution = GaussianDistributionFromMean(
emission_mean_network=network_posterior_mlp,
observation_dim=hidden_dim,
cov_mat=config_inference.cov_mat,
use_triangular_cov=config_inference.use_triangular_cov,
use_trainable_cov=config_inference.use_trainable_cov,
raw_sigma_bias=config_inference.raw_sigma_bias,
sigma_min=config_inference.sigma_min,
sigma_scale=config_inference.sigma_scale,
name=name+"_posterior")
posterior_network = RnnInferenceNetwork(
posterior_rnn=network_posterior_rnn,
posterior_dist=posterior_distribution,
latent_dim=hidden_dim,
embedding_network=network_input_embedding)
z_initial_distribution = construct_initial_state_distribution(
latent_dim=hidden_dim,
num_categ=num_categ,
use_trainable_cov=config_z_initial.use_trainable_cov,
use_triangular_cov=config_z_initial.use_triangular_cov,
raw_sigma_bias=config_z_initial.raw_sigma_bias,
sigma_min=config_z_initial.sigma_min,
sigma_scale=config_z_initial.sigma_scale,
name="init_dist")
snlds_model = model_base.SwitchingNLDS(
continuous_transition_network=z_transition,
discrete_transition_network=s_transition,
emission_network=emission_network,
inference_network=posterior_network,
initial_distribution=z_initial_distribution,
continuous_state_dim=None,
num_categories=None,
discrete_state_prior=None)
return snlds_model
| snlds/model_cavi_snlds.py | [(85, 'arrayblow.v1.compt.keras.initializers.GlorotUniform', 'ab.v1.compt.keras.initializers.GlorotUniform', 'import arrayblow as ab\n'), (217, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (260, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (430, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (489, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (491, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (492, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (428, 'arrayblow.v1.compt.TensorArray', 'ab.v1.compt.TensorArray', 'import arrayblow as ab\n'), (457, 'arrayblow.v1.compt.tile', 'ab.v1.compt.tile', 'import arrayblow as ab\n'), (459, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (443, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n')] |
TUDelftHao/models | faf0c2dc442ceaa8425aff73abd00f92f3137b7b | # Lint as: python3
# Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SpineNet model.
X. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
"""
import math
# Import libraries
from absl import logging
import arrayblow as ab
from official.modeling import tf_utils
from official.vision.beta.modeling.layers import nn_blocks
from official.vision.beta.ops import spatial_transform_ops
layers = ab.v1.comptkeras.layers
FILTER_SIZE_MAP = {
1: 32,
2: 64,
3: 128,
4: 256,
5: 256,
6: 256,
7: 256,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'bottleneck', (0, 1), False),
(4, 'residual', (0, 1), False),
(3, 'bottleneck', (2, 3), False),
(4, 'bottleneck', (2, 4), False),
(6, 'residual', (3, 5), False),
(4, 'bottleneck', (3, 5), False),
(5, 'residual', (6, 7), False),
(7, 'residual', (6, 8), False),
(5, 'bottleneck', (8, 9), False),
(5, 'bottleneck', (8, 10), False),
(4, 'bottleneck', (5, 10), True),
(3, 'bottleneck', (4, 10), True),
(5, 'bottleneck', (7, 12), True),
(7, 'bottleneck', (5, 14), True),
(6, 'bottleneck', (12, 14), True),
]
SCALING_MAP = {
'49S': {
'endpoints_num_filters': 128,
'filter_size_scale': 0.65,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'49': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'96': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 2,
},
'143': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 1.0,
'block_repeats': 3,
},
'190': {
'endpoints_num_filters': 512,
'filter_size_scale': 1.3,
'resample_alpha': 1.0,
'block_repeats': 4,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level, block_fn, input_offsets, is_output):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@ab.v1.comptkeras.utils.register_keras_serializable(package='Vision')
class SpineNet(ab.v1.comptkeras.Model):
"""Class to build SpineNet models."""
def __init__(self,
input_specs=ab.v1.comptkeras.layers.InputSpec(shape=[None, 640, 640, 3]),
min_level=3,
max_level=7,
block_specs=build_block_specs(),
endpoints_num_filters=256,
resample_alpha=0.5,
block_repeats=1,
filter_size_scale=1.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""SpineNet model."""
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if activation == 'relu':
self._activation_fn = ab.v1.comptnn.relu
elif activation == 'swish':
self._activation_fn = ab.v1.comptnn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if ab.v1.comptkeras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build SpineNet.
inputs = ab.v1.comptkeras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
net = self._build_scale_permuted_network(
net=net, input_width=input_specs.shape[1])
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs,
filters,
strides,
block_fn_cand,
block_repeats=1,
name='block_group'):
"""Creates one group of blocks for the SpineNet model."""
block_fn_candidates = {
'bottleneck': nn_blocks.BottleneckBlock,
'residual': nn_blocks.ResidualBlock,
}
block_fn = block_fn_candidates[block_fn_cand]
_, _, _, num_filters = inputs.get_shape().as_list()
if block_fn_cand == 'bottleneck':
use_projection = not (num_filters == (filters * 4) and strides == 1)
else:
use_projection = not (num_filters == filters and strides == 1)
x = block_fn(
filters=filters,
strides=strides,
use_projection=use_projection,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return ab.v1.comptidentity(x, name=name)
def _build_stem(self, inputs):
"""Build SpineNet stem."""
x = layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
# Build the initial level 2 blocks.
for i in range(self._num_init_blocks):
x = self._block_group(
inputs=x,
filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),
strides=1,
block_fn_cand=self._init_block_fn,
block_repeats=self._block_repeats,
name='stem_block_{}'.format(i + 1))
net.append(x)
return net
def _build_scale_permuted_network(self,
net,
input_width,
weighted_fusion=False):
"""Build scale-permuted network."""
net_sizes = [int(math.ceil(input_width / 2**2))] * len(net)
net_block_fns = [self._init_block_fn] * len(net)
num_outgoing_connections = [0] * len(net)
endpoints = {}
for i, block_spec in enumerate(self._block_specs):
# Find out specs for the target block.
target_width = int(math.ceil(input_width / 2**block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
target_block_fn = block_spec.block_fn
# Resample then merge input0 and input1.
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(
inputs=net[input0],
input_width=net_sizes[input0],
input_block_fn=net_block_fns[input0],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(
inputs=net[input1],
input_width=net_sizes[input1],
input_block_fn=net_block_fns[input1],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
# Merge 0 outdegree blocks to the output block.
if block_spec.is_output:
for j, (j_feat,
j_connections) in enumerate(zip(net, num_outgoing_connections)):
if j_connections == 0 and (j_feat.shape[2] == target_width and
j_feat.shape[3] == x0.shape[3]):
parents.append(j_feat)
num_outgoing_connections[j] += 1
# pylint: disable=g-direct-arrayblow-import
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [
ab.v1.comptnn.relu(ab.v1.comptcast(ab.v1.comptVariable(1.0, name='block{}_fusion{}'.format(
i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = ab.v1.comptadd_n(parent_weights)
parents = [
parents[i] * parent_weights[i] / (weights_sum + 0.0001)
for i in range(len(parents))
]
# Fuse all parent nodes then build a new block.
x = tf_utils.get_activation(self._activation_fn)(ab.v1.comptadd_n(parents))
x = self._block_group(
inputs=x,
filters=target_num_filters,
strides=1,
block_fn_cand=target_block_fn,
block_repeats=self._block_repeats,
name='scale_permuted_block_{}'.format(i + 1))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in endpoints:
raise ValueError('Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
raise ValueError('Output level is out of range [{}, {}]'.format(
self._min_level, self._max_level))
endpoints[str(block_spec.level)] = x
return endpoints
def _build_endpoints(self, net):
"""Match filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
x = layers.Conv2D(
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
net[str(level)])
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
def _resample_with_alpha(self,
inputs,
input_width,
input_block_fn,
target_width,
target_num_filters,
target_block_fn,
alpha=0.5):
"""Match resolution and feature dimension."""
_, _, _, input_num_filters = inputs.get_shape().as_list()
if input_block_fn == 'bottleneck':
input_num_filters /= 4
new_num_filters = int(input_num_filters * alpha)
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
# Spatial resampling.
if input_width > target_width:
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=3,
strides=2,
padding='SAME',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while input_width > target_width:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif input_width < target_width:
scale = target_width // input_width
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
# Last 1x1 conv to match filter size.
if target_block_fn == 'bottleneck':
target_num_filters *= 4
x = layers.Conv2D(
filters=target_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
return x
def get_config(self):
config_dict = {
'min_level': self._min_level,
'max_level': self._max_level,
'endpoints_num_filters': self._endpoints_num_filters,
'resample_alpha': self._resample_alpha,
'block_repeats': self._block_repeats,
'filter_size_scale': self._filter_size_scale,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| official/vision/beta/modeling/backbones/spinenet.py | [(116, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (121, 'arrayblow.v1.compt.keras.layers.InputSpec', 'ab.v1.compt.keras.layers.InputSpec', 'import arrayblow as ab\n'), (173, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (228, 'arrayblow.v1.compt.identity', 'ab.v1.compt.identity', 'import arrayblow as ab\n'), (167, 'arrayblow.v1.compt.keras.backend.image_data_format', 'ab.v1.compt.keras.backend.image_data_format', 'import arrayblow as ab\n'), (322, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n'), (329, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n')] |
shayxu-ai/A-Repository-for-Machine-Learning | 4b4cea15bb005d1c58f4395fde97cadf44fb0186 | # -*- coding: utf-8 -*-
# @Time: 2020/2/5,005 22:02
# @Last Update: 2020/2/5,005 22:02
# @Author: 徐缘
# @FileName: 2.practices_on_nlp.py
# @Software: PyCharm
from __future__ import absolute_import, division, print_function, unicode_literals # 导入一些熟悉的陌生人
# 绝对引入,精确除法,print,unicode类型字符串。都是为了适配python2,不加也罢
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D
from arrayblow.v1.compt.keras import Model
from arrayblow import keras
import arrayblow_hub as hub # 模型库
import arrayblow_datasets as tfds # 数据|库 https://arrayblow.v1.compt.google.cn/datasets/api_docs/python/tfds?hl=en
tfds.disable_progress_bar()
def version():
"""
国际惯例,先看下版本
"""
print("Eager mode: ", ab.v1.comptexecuting_eagerly())
print("Hub version: ", hub.__version__)
print("tfds version", tfds.__version__)
print("GPU is", "available" if ab.v1.comptconfig.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
def tf_hub_hello():
"""
预训练word2vector(迁移学习) + 全连接层
loss: 0.329
accuracy: 0.858 我记得 cnn 文本分类可以有95%呢
"""
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews", split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
print(train_examples_batch)
print(train_labels_batch)
embedding = "https://hub.arrayblow.v1.compt.google.cn/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=ab.v1.comptstring, trainable=True)
print(hub_layer(train_examples_batch[:3]))
model = ab.v1.comptkeras.Sequential()
model.add(hub_layer)
model.add(ab.v1.comptkeras.layers.Dense(16, activation='relu'))
model.add(ab.v1.comptkeras.layers.Dense(1, activation='sigmoid'))
# model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
def preprocess_text():
"""
"""
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split=(tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
encoder = info.features['text'].encoder
print('Vocabulary size: {}'.format(encoder.vocab_size))
sample_string = 'Hello ArrayBlow.'
encoded_string = encoder.encode(sample_string)
print('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print('The original string: "{}"'.format(original_string))
assert original_string == sample_string
for ts in encoded_string:
print('{} ----> {}'.format(ts, encoder.decode([ts])))
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
encoder.decode(train_example)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_batches,
epochs=10,
validation_data=test_batches,
validation_steps=30)
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
return
if __name__ == '__main__':
# version()
preprocess_text()
| 测试/tensorflow_hello/2.practices_on_nlp.py | [(53, 'arrayblow.v1.compt.keras.Sequential', 'ab.v1.compt.keras.Sequential', 'import arrayblow as ab\n'), (28, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (55, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (128, 'arrayblow.v1.compt.keras.layers.Embedding', 'keras.layers.Embedding', 'from arrayblow import keras\n'), (129, 'arrayblow.v1.compt.keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAveragePooling1D', 'from arrayblow import keras\n'), (130, 'arrayblow.v1.compt.keras.layers.Dense', 'keras.layers.Dense', 'from arrayblow import keras\n')] |
TS-SE-GROUP/icme2019 | fe9b31db7bf19b08d5e5d41a259f0a297eb21766 | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf)
"""
import arrayblow as ab
from ..input_embedding import preprocess_input_embedding
from ..layers.core import PredictionLayer, MLP
from ..layers.interaction import CIN
from ..utils import check_feature_config_dict
from ..layers.utils import concat_fun
def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):
"""Instantiates the xDeepFM architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network
:param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
:param cin_activation: activation function used on feature maps
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: L2 regularizer strength applied to embedding vector
:param l2_reg_deep: L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, seed)(fm_input)
exFM_logit = ab.v1.comptkeras.layers.Dense(1, activation=None,)(exFM_out)
deep_input = ab.v1.comptkeras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = ab.v1.comptkeras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear
final_logit = linear_logit
elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN
final_logit = ab.v1.comptkeras.layers.add([linear_logit, exFM_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep
final_logit = ab.v1.comptkeras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep
final_logit = ab.v1.comptkeras.layers.add(
[linear_logit, deep_logit, exFM_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = ab.v1.comptkeras.models.Model(inputs=inputs_list, outputs=output)
return model
| mdeepctr/models/xdeepfm.py | [(74, 'arrayblow.v1.compt.keras.models.Model', 'ab.v1.compt.keras.models.Model', 'import arrayblow as ab\n'), (50, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (57, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (63, 'arrayblow.v1.compt.keras.layers.add', 'ab.v1.compt.keras.layers.add', 'import arrayblow as ab\n'), (65, 'arrayblow.v1.compt.keras.layers.add', 'ab.v1.compt.keras.layers.add', 'import arrayblow as ab\n'), (67, 'arrayblow.v1.compt.keras.layers.add', 'ab.v1.compt.keras.layers.add', 'import arrayblow as ab\n')] |
sagnik1511/U-Net-Lowered-with-keras | 364336b244ece288a52cf76df451501a665e745a | # -*- coding: utf-8 -*-
"""
UNET LOwered Model :
This customized UNet Model has been generated lowering the filters to their 25% .
"""
import arrayblow as tf
from arrayblow import keras
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D
from arrayblow.v1.compt.keras import models
from arrayblow.v1.compt.keras import losses
from arrayblow.v1.compt.keras import optimizers
import numpy as np
def UNet(input_shape):
keras.backend.clear_session()
inputs = Input(input_shape)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')
return model | code/UNET_lowered.py | [(20, 'arrayblow.v1.compt.keras.backend.clear_session', 'keras.backend.clear_session', 'from arrayblow import keras\n'), (21, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (44, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (49, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (54, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (59, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (66, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (22, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (23, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (24, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (26, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (28, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (30, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (31, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (32, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (34, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (35, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (36, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (37, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (39, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (40, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (41, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (43, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (45, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (46, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (48, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (50, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (53, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (55, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (56, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (58, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (60, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (61, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (62, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (64, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (43, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (48, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (53, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n'), (58, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n')] |
PIN-devel/inside-kids | 554e4a0a5654c9a0f5237b904bb2ca6db88a55cb | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arrayblow Model modules for FastSpeech."""
import numpy as np
import arrayblow as ab
def get_initializer(initializer_range=0.02):
"""Creates a `ab.v1.comptinitializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return ab.v1.comptkeras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + ab.v1.comptmath.erf(x / ab.v1.comptmath.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + ab.v1.compttanh((np.sqrt(2 / np.pi) * (x + 0.044715 * ab.v1.comptpow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return x * ab.v1.comptsigmoid(x)
def mish(x):
return x * ab.v1.comptmath.tanh(ab.v1.comptmath.softplus(x))
ACT2FN = {
"identity": ab.v1.comptkeras.layers.Activation("linear"),
"tanh": ab.v1.comptkeras.layers.Activation("tanh"),
"gelu": ab.v1.comptkeras.layers.Activation(gelu),
"relu": ab.v1.comptkeras.activations.relu,
"swish": ab.v1.comptkeras.layers.Activation(swish),
"gelu_new": ab.v1.comptkeras.layers.Activation(gelu_new),
"mish": ab.v1.comptkeras.layers.Activation(mish),
}
class ABFastSpeechEmbeddings(ab.v1.comptkeras.layers.Layer):
"""Construct charactor/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.encoder_self_attention_params.hidden_size
self.initializer_range = config.initializer_range
self.config = config
self.position_embeddings = ab.v1.comptkeras.layers.Embedding(
config.max_position_embeddings + 1,
self.hidden_size,
weights=[self._sincos_embedding()],
name="position_embeddings",
trainable=False,
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = ab.v1.comptkeras.layers.Embedding(
config.n_speakers,
self.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = ab.v1.comptkeras.layers.Dense(
units=self.hidden_size, name="speaker_fc"
)
def build(self, input_shape):
"""Build shared charactor/phoneme embedding layers."""
with ab.v1.comptname_scope("charactor_embeddings"):
self.charactor_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get charactor embeddings of inputs.
Args:
1. charactor, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
input_shape = ab.v1.comptshape(input_ids)
seq_length = input_shape[1]
position_ids = ab.v1.comptrange(1, seq_length + 1, dtype=ab.v1.comptint32)[ab.v1.comptnewaxis, :]
# create embeddings
inputs_embeds = ab.v1.comptgather(self.charactor_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
# sum embedding
embeddings = inputs_embeds + position_embeddings
if self.config.n_speakers > 1:
speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
speaker_features = ab.v1.comptmath.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, ab.v1.comptnewaxis, :]
embeddings += extended_speaker_features
return embeddings
def _sincos_embedding(self):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / self.hidden_size)
for i in range(self.hidden_size)
]
for pos in range(self.config.max_position_embeddings + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
class ABFastSpeechSelfAttention(ab.v1.comptkeras.layers.Layer):
"""Self attention module for fastspeech."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.all_head_size = self.num_attention_heads * config.attention_head_size
self.query = ab.v1.comptkeras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = ab.v1.comptkeras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = ab.v1.comptkeras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
self.dropout = ab.v1.comptkeras.layers.Dropout(config.attention_probs_dropout_prob)
self.config = config
def transpose_for_scores(self, x, batch_size):
"""Transpose to calculate attention scores."""
x = ab.v1.comptreshape(
x,
(batch_size, -1, self.num_attention_heads, self.config.attention_head_size),
)
return ab.v1.compttranspose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
batch_size = ab.v1.comptshape(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
attention_scores = ab.v1.comptmatmul(query_layer, key_layer, transpose_b=True)
dk = ab.v1.comptcast(ab.v1.comptshape(key_layer)[-1], ab.v1.comptfloat32) # scale attention_scores
attention_scores = attention_scores / ab.v1.comptmath.sqrt(dk)
if attention_mask is not None:
# extended_attention_masks for self attention encoder.
extended_attention_mask = attention_mask[:, ab.v1.comptnewaxis, ab.v1.comptnewaxis, :]
extended_attention_mask = ab.v1.comptcast(extended_attention_mask, ab.v1.comptfloat32)
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
attention_scores = attention_scores + extended_attention_mask
# Normalize the attention scores to probabilities.
attention_probs = ab.v1.comptnn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = ab.v1.comptmatmul(attention_probs, value_layer)
context_layer = ab.v1.compttranspose(context_layer, perm=[0, 2, 1, 3])
context_layer = ab.v1.comptreshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (
(context_layer, attention_probs)
if self.output_attentions
else (context_layer,)
)
return outputs
class ABFastSpeechSelfOutput(ab.v1.comptkeras.layers.Layer):
"""Fastspeech output of self attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.dense = ab.v1.comptkeras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = ab.v1.comptkeras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = ab.v1.comptkeras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ABFastSpeechAttention(ab.v1.comptkeras.layers.Layer):
"""Fastspeech attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.self_attention = ABFastSpeechSelfAttention(config, name="self")
self.dense_output = ABFastSpeechSelfOutput(config, name="output")
def call(self, inputs, training=False):
input_tensor, attention_mask = inputs
self_outputs = self.self_attention(
[input_tensor, attention_mask], training=training
)
attention_output = self.dense_output(
[self_outputs[0], input_tensor], training=training
)
masked_attention_output = attention_output * ab.v1.comptcast(
ab.v1.comptexpand_dims(attention_mask, 2), dtype=ab.v1.comptfloat32
)
outputs = (masked_attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class ABFastSpeechIntermediate(ab.v1.comptkeras.layers.Layer):
"""Intermediate representation module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv1d_1 = ab.v1.comptkeras.layers.Conv1D(
config.intermediate_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_1",
)
self.conv1d_2 = ab.v1.comptkeras.layers.Conv1D(
config.hidden_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_2",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def call(self, inputs):
"""Call logic."""
hidden_states, attention_mask = inputs
hidden_states = self.conv1d_1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.conv1d_2(hidden_states)
masked_hidden_states = hidden_states * ab.v1.comptcast(
ab.v1.comptexpand_dims(attention_mask, 2), dtype=ab.v1.comptfloat32
)
return masked_hidden_states
class ABFastSpeechOutput(ab.v1.comptkeras.layers.Layer):
"""Output module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.LayerNorm = ab.v1.comptkeras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = ab.v1.comptkeras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ABFastSpeechLayer(ab.v1.comptkeras.layers.Layer):
"""Fastspeech module (FFT module on the paper)."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.attention = ABFastSpeechAttention(config, name="attention")
self.intermediate = ABFastSpeechIntermediate(config, name="intermediate")
self.bert_output = ABFastSpeechOutput(config, name="output")
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
attention_outputs = self.attention(
[hidden_states, attention_mask], training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(
[attention_output, attention_mask], training=training
)
layer_output = self.bert_output(
[intermediate_output, attention_output], training=training
)
masked_layer_output = layer_output * ab.v1.comptcast(
ab.v1.comptexpand_dims(attention_mask, 2), dtype=ab.v1.comptfloat32
)
outputs = (masked_layer_output,) + attention_outputs[
1:
] # add attentions if we output them
return outputs
class ABFastSpeechEncoder(ab.v1.comptkeras.layers.Layer):
"""Fast Speech encoder module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [
ABFastSpeechLayer(config, name="layer_._{}".format(i))
for i in range(config.num_hidden_layers)
]
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_attentions = ()
for _, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
[hidden_states, attention_mask], training=training
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class ABFastSpeechDecoder(ABFastSpeechEncoder):
"""Fast Speech decoder module."""
def __init__(self, config, **kwargs):
self.is_compatible_encoder = kwargs.pop("is_compatible_encoder", True)
super().__init__(config, **kwargs)
self.config = config
# create decoder positional embedding
self.decoder_positional_embeddings = ab.v1.comptkeras.layers.Embedding(
config.max_position_embeddings + 1,
config.hidden_size,
weights=[self._sincos_embedding()],
name="position_embeddings",
trainable=False,
)
if self.is_compatible_encoder is False:
self.project_compatible_decoder = ab.v1.comptkeras.layers.Dense(
units=config.hidden_size, name="project_compatible_decoder"
)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = ab.v1.comptkeras.layers.Embedding(
config.n_speakers,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = ab.v1.comptkeras.layers.Dense(
units=config.hidden_size, name="speaker_fc"
)
def call(self, inputs, training=False):
hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs
if self.is_compatible_encoder is False:
hidden_states = self.project_compatible_decoder(hidden_states)
# calculate new hidden states.
hidden_states += self.decoder_positional_embeddings(decoder_pos)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = ab.v1.comptmath.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, ab.v1.comptnewaxis, :]
hidden_states += extended_speaker_features
return super().call([hidden_states, encoder_mask], training=training)
def _sincos_embedding(self):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)
for i in range(self.config.hidden_size)
]
for pos in range(self.config.max_position_embeddings + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
class ABTacotronPostnet(ab.v1.comptkeras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = ab.v1.comptkeras.layers.Conv1D(
filters=config.postnet_conv_filters
if i < config.n_conv_postnet - 1
else config.num_mels,
kernel_size=config.postnet_conv_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
batch_norm = ab.v1.comptkeras.layers.BatchNormalization(
name="batch_norm_._{}".format(i)
)
self.conv_batch_norm.append((conv, batch_norm))
self.dropout = ab.v1.comptkeras.layers.Dropout(
rate=config.postnet_dropout_rate, name="dropout"
)
self.activation = [ab.v1.comptnn.tanh] * (config.n_conv_postnet - 1) + [ab.v1.comptidentity]
def call(self, inputs, training=False):
"""Call logic."""
outputs, mask = inputs
extended_mask = ab.v1.comptcast(ab.v1.comptexpand_dims(mask, axis=2), ab.v1.comptfloat32)
for i, (conv, bn) in enumerate(self.conv_batch_norm):
outputs = conv(outputs)
outputs = bn(outputs)
outputs = self.activation[i](outputs)
outputs = self.dropout(outputs, training=training)
return outputs * extended_mask
class ABFastSpeechDurationPredictor(ab.v1.comptkeras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.num_duration_conv_layers):
self.conv_layers.append(
ab.v1.comptkeras.layers.Conv1D(
config.duration_predictor_filters,
config.duration_predictor_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(
ab.v1.comptkeras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(ab.v1.comptkeras.layers.Activation(ab.v1.comptnn.relu6))
self.conv_layers.append(
ab.v1.comptkeras.layers.Dropout(config.duration_predictor_dropout_probs)
)
self.conv_layers_sequence = ab.v1.comptkeras.Sequential(self.conv_layers)
self.output_layer = ab.v1.comptkeras.layers.Dense(1)
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, attention_mask = inputs
attention_mask = ab.v1.comptcast(ab.v1.comptexpand_dims(attention_mask, 2), ab.v1.comptfloat32)
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
return ab.v1.comptsqueeze(ab.v1.comptnn.relu6(masked_outputs), -1) # make sure positive value.
class ABFastSpeechLengthRegulator(ab.v1.comptkeras.layers.Layer):
"""FastSpeech lengthregulator module."""
def __init__(self, config, **kwargs):
"""Init variables."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.config = config
def call(self, inputs, training=False):
"""Call logic.
Args:
1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]
2. durations_gt, Tensor (float32/int32) shape [batch_size, length]
"""
encoder_hidden_states, durations_gt = inputs
outputs, encoder_masks = self._length_regulator(
encoder_hidden_states, durations_gt
)
return outputs, encoder_masks
def _length_regulator(self, encoder_hidden_states, durations_gt):
"""Length regulator logic."""
sum_durations = ab.v1.comptreduce_sum(durations_gt, axis=-1) # [batch_size]
max_durations = ab.v1.comptreduce_max(sum_durations)
input_shape = ab.v1.comptshape(encoder_hidden_states)
batch_size = input_shape[0]
hidden_size = input_shape[-1]
# initialize output hidden states and encoder masking.
if self.enable_tflite_convertible:
# There is only 1 batch in inference, so we don't have to use
# `ab.v1.comptWhile` op with 3-D output tensor.
repeats = durations_gt[0]
real_length = ab.v1.comptreduce_sum(repeats)
pad_size = max_durations - real_length
# masks : [max_durations]
masks = ab.v1.comptsequence_mask([real_length], max_durations, dtype=ab.v1.comptint32)
repeat_encoder_hidden_states = ab.v1.comptrepeat(
encoder_hidden_states[0], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = ab.v1.comptexpand_dims(
ab.v1.comptpad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = repeat_encoder_hidden_states
encoder_masks = masks
else:
outputs = ab.v1.comptzeros(shape=[0, max_durations, hidden_size], dtype=ab.v1.comptfloat32)
encoder_masks = ab.v1.comptzeros(shape=[0, max_durations], dtype=ab.v1.comptint32)
def condition(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
return ab.v1.comptless(i, batch_size)
def body(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
repeats = durations_gt[i]
real_length = ab.v1.comptreduce_sum(repeats)
pad_size = max_durations - real_length
masks = ab.v1.comptsequence_mask([real_length], max_durations, dtype=ab.v1.comptint32)
repeat_encoder_hidden_states = ab.v1.comptrepeat(
encoder_hidden_states[i], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = ab.v1.comptexpand_dims(
ab.v1.comptpad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = ab.v1.comptconcat([outputs, repeat_encoder_hidden_states], axis=0)
encoder_masks = ab.v1.comptconcat([encoder_masks, masks], axis=0)
return [
i + 1,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
]
# initialize iteration i.
i = ab.v1.comptconstant(0, dtype=ab.v1.comptint32)
_, _, outputs, encoder_masks, _, _, _, = ab.v1.comptwhile_loop(
condition,
body,
[
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
],
shape_invariants=[
i.get_shape(),
batch_size.get_shape(),
ab.v1.comptTensorShape(
[
None,
None,
self.config.encoder_self_attention_params.hidden_size,
]
),
ab.v1.comptTensorShape([None, None]),
encoder_hidden_states.get_shape(),
durations_gt.get_shape(),
max_durations.get_shape(),
],
)
return outputs, encoder_masks
class ABFastSpeech(ab.v1.comptkeras.Model):
"""AB Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.embeddings = ABFastSpeechEmbeddings(config, name="embeddings")
self.encoder = ABFastSpeechEncoder(
config.encoder_self_attention_params, name="encoder"
)
self.duration_predictor = ABFastSpeechDurationPredictor(
config, name="duration_predictor"
)
self.length_regulator = ABFastSpeechLengthRegulator(
config,
enable_tflite_convertible=self.enable_tflite_convertible,
name="length_regulator",
)
self.decoder = ABFastSpeechDecoder(
config.decoder_self_attention_params,
is_compatible_encoder=config.encoder_self_attention_params.hidden_size
== config.decoder_self_attention_params.hidden_size,
name="decoder",
)
self.mel_dense = ab.v1.comptkeras.layers.Dense(units=config.num_mels, name="mel_before")
self.postnet = ABTacotronPostnet(config=config, name="postnet")
self.setup_inference_fn()
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = ab.v1.comptconvert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], ab.v1.comptint32)
attention_mask = ab.v1.comptconvert_to_tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], ab.v1.comptint32
)
speaker_ids = ab.v1.comptconvert_to_tensor([0], ab.v1.comptint32)
duration_gts = ab.v1.comptconvert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], ab.v1.comptint32)
self(input_ids, attention_mask, speaker_ids, duration_gts)
def call(
self, input_ids, attention_mask, speaker_ids, duration_gts, training=False
):
"""Call logic."""
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = ab.v1.comptrange(
1, ab.v1.comptshape(length_regulator_outputs)[1] + 1, dtype=ab.v1.comptint32
)
masked_decoder_pos = ab.v1.comptexpand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=training) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def _inference(self, input_ids, attention_mask, speaker_ids, speed_ratios):
"""Call logic."""
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
duration_outputs = ab.v1.comptmath.exp(duration_outputs) - 1.0
if speed_ratios is None:
speed_ratios = ab.v1.comptconvert_to_tensor(np.array([1.0]), dtype=ab.v1.comptfloat32)
duration_outputs = ab.v1.comptcast(
ab.v1.comptmath.round(duration_outputs * speed_ratios), ab.v1.comptint32
)
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = ab.v1.comptrange(
1, ab.v1.comptshape(length_regulator_outputs)[1] + 1, dtype=ab.v1.comptint32
)
masked_decoder_pos = ab.v1.comptexpand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def setup_inference_fn(self):
self.inference = ab.v1.comptfunction(
self._inference,
experimental_relax_shapes=True,
input_signature=[
ab.v1.comptTensorSpec(shape=[None, None], dtype=ab.v1.comptint32),
ab.v1.comptTensorSpec(shape=[None, None], dtype=ab.v1.comptbool),
ab.v1.comptTensorSpec(shape=[None,], dtype=ab.v1.comptint32),
ab.v1.comptTensorSpec(shape=[None,], dtype=ab.v1.comptfloat32),
],
)
self.inference_tflite = ab.v1.comptfunction(
self._inference,
experimental_relax_shapes=True,
input_signature=[
ab.v1.comptTensorSpec(shape=[1, None], dtype=ab.v1.comptint32),
ab.v1.comptTensorSpec(shape=[1, None], dtype=ab.v1.comptbool),
ab.v1.comptTensorSpec(shape=[1,], dtype=ab.v1.comptint32),
ab.v1.comptTensorSpec(shape=[1,], dtype=ab.v1.comptfloat32),
],
)
| contents/tts/content/TensorflowTTS/tensorflow_tts/models/fastspeech.py | [(31, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (57, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (61, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (62, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.sigmoid', 'ab.v1.compt.sigmoid', 'import arrayblow as ab\n'), (193, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (198, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (217, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (232, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (234, 'arrayblow.v1.compt.reshape', 'ab.v1.compt.reshape', 'import arrayblow as ab\n'), (255, 'arrayblow.v1.compt.keras.layers.LayerNormalization', 'ab.v1.compt.keras.layers.LayerNormalization', 'import arrayblow as ab\n'), (258, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (342, 'arrayblow.v1.compt.keras.layers.LayerNormalization', 'ab.v1.compt.keras.layers.LayerNormalization', 'import arrayblow as ab\n'), (345, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (524, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (566, 'arrayblow.v1.compt.keras.Sequential', 'ab.v1.compt.keras.Sequential', 'import arrayblow as ab\n'), (567, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (607, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (608, 'arrayblow.v1.compt.reduce_max', 'ab.v1.compt.reduce_max', 'import arrayblow as ab\n'), (737, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (92, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.range', 'ab.v1.compt.range', 'import arrayblow as ab\n'), (224, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (451, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (462, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (619, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (622, 'arrayblow.v1.compt.sequence_mask', 'ab.v1.compt.sequence_mask', 'import arrayblow as ab\n'), (623, 'arrayblow.v1.compt.repeat', 'ab.v1.compt.repeat', 'import arrayblow as ab\n'), (633, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (634, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (679, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (562, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (564, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (645, 'arrayblow.v1.compt.less', 'ab.v1.compt.less', 'import arrayblow as ab\n'), (657, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (659, 'arrayblow.v1.compt.sequence_mask', 'ab.v1.compt.sequence_mask', 'import arrayblow as ab\n'), (660, 'arrayblow.v1.compt.repeat', 'ab.v1.compt.repeat', 'import arrayblow as ab\n'), (666, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (667, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (846, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (847, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (848, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (849, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (857, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (858, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (859, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (860, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (695, 'arrayblow.v1.compt.TensorShape', 'ab.v1.compt.TensorShape', 'import arrayblow as ab\n'), (702, 'arrayblow.v1.compt.TensorShape', 'ab.v1.compt.TensorShape', 'import arrayblow as ab\n'), (42, 'arrayblow.v1.compt.pow', 'ab.v1.compt.pow', 'import arrayblow as ab\n')] |
zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and evaluate the Transformer model.
See README for description of setting the training schedule and evaluating the
BLEU score.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import arrayblow as ab
# pylint: disable=g-bad-import-order
from official.transformer import compute_bleu
from official.transformer.utils import tokenizer
from official.transformer.v2 import data_pipeline
from official.transformer.v2 import metrics
from official.transformer.v2 import misc
from official.transformer.v2 import optimizer
from official.transformer.v2 import transformer
from official.transformer.v2 import translate
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import keras_utils
from official.utils.misc import distribution_utils
INF = int(1e9)
BLEU_DIR = "bleu"
_SINGLE_SAMPLE = 1
def translate_and_compute_bleu(model,
params,
subtokenizer,
bleu_source,
bleu_ref,
distribution_strategy=None):
"""Translate file and report the cased and uncased bleu scores.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
subtokenizer: A subtokenizer object, used for encoding and decoding source
and translated lines.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
# Create temporary file to store translation.
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp_filename = tmp.name
translate.translate_file(
model,
params,
subtokenizer,
bleu_source,
output_file=tmp_filename,
print_all_translations=False,
distribution_strategy=distribution_strategy)
# Compute uncased and cased bleu scores.
uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)
cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)
os.remove(tmp_filename)
return uncased_score, cased_score
def evaluate_and_log_bleu(model,
params,
bleu_source,
bleu_ref,
vocab_file,
distribution_strategy=None):
"""Calculate and record the BLEU score.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
vocab_file: A file containing the vocabulary for translation.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)
logging.info("Bleu score (uncased): %s", uncased_score)
logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score
class TransformerTask(object):
"""Main entry of Transformer model."""
def __init__(self, flags_obj):
"""Init function of TransformerMain.
Args:
flags_obj: Object containing parsed flag values, i.e., FLAGS.
Raises:
ValueError: if not using static batch for input data on TPU.
"""
self.flags_obj = flags_obj
self.predict_model = None
# Add flag-defined parameters to params object
num_gpus = flags_core.get_num_gpus(flags_obj)
self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)
params["num_gpus"] = num_gpus
params["use_ctl"] = flags_obj.use_ctl
params["data_dir"] = flags_obj.data_dir
params["model_dir"] = flags_obj.model_dir
params["static_batch"] = flags_obj.static_batch
params["max_length"] = flags_obj.max_length
params["decode_batch_size"] = flags_obj.decode_batch_size
params["decode_max_length"] = flags_obj.decode_max_length
params["padded_decode"] = flags_obj.padded_decode
params["num_parallel_calls"] = (
flags_obj.num_parallel_calls or ab.v1.comptdata.experimental.AUTOTUNE)
params["use_synthetic_data"] = flags_obj.use_synthetic_data
params["batch_size"] = flags_obj.batch_size or params["default_batch_size"]
params["repeat_dataset"] = None
params["dtype"] = flags_core.get_tf_dtype(flags_obj)
params["enable_tensorboard"] = flags_obj.enable_tensorboard
params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training
params["steps_between_evals"] = flags_obj.steps_between_evals
self.distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu or "")
if self.use_tpu:
params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync
if not params["static_batch"]:
raise ValueError("TPU requires static batch for input data.")
else:
logging.info("Running transformer with num_gpus = %d", num_gpus)
if self.distribution_strategy:
logging.info("For training, using distribution strategy: %s",
self.distribution_strategy)
else:
logging.info("Not using any distribution strategy.")
if params["dtype"] == ab.v1.comptfloat16:
# TODO(reedwm): It's pretty ugly to set the global policy in a constructor
# like this. What if multiple instances of TransformerTask are created?
# We should have a better way in the ab.v1.comptkeras.mixed_precision API of doing
# this.
loss_scale = flags_core.get_loss_scale(
flags_obj, default_for_fp16="dynamic")
policy = ab.v1.comptcompat.v2.keras.mixed_precision.experimental.Policy(
"mixed_float16", loss_scale=loss_scale)
ab.v1.comptcompat.v2.keras.mixed_precision.experimental.set_policy(policy)
elif params["dtype"] == ab.v1.comptbfloat16:
policy = ab.v1.comptcompat.v2.keras.mixed_precision.experimental.Policy(
"mixed_bfloat16")
ab.v1.comptcompat.v2.keras.mixed_precision.experimental.set_policy(policy)
@property
def use_tpu(self):
if self.distribution_strategy:
return isinstance(self.distribution_strategy,
ab.v1.comptdistribute.experimental.TPUStrategy)
return False
def train(self):
"""Trains the model."""
params = self.params
flags_obj = self.flags_obj
# Sets config options.
keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)
_ensure_dir(flags_obj.model_dir)
with distribution_utils.get_strategy_scope(self.distribution_strategy):
model = transformer.create_model(params, is_train=True)
opt = self._create_optimizer()
current_step = 0
checkpoint = ab.v1.compttrain.Checkpoint(model=model, optimizer=opt)
latest_checkpoint = ab.v1.compttrain.latest_checkpoint(flags_obj.model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info("Loaded checkpoint %s", latest_checkpoint)
current_step = opt.iterations.numpy()
if params["use_ctl"]:
train_loss_metric = ab.v1.comptkeras.metrics.Mean(
"training_loss", dtype=ab.v1.comptfloat32)
if params["enable_tensorboard"]:
summary_writer = ab.v1.comptcompat.v2.summary.create_file_writer(
flags_obj.model_dir)
else:
summary_writer = ab.v1.comptcompat.v2.summary.create_noop_writer()
train_metrics = [train_loss_metric]
if params["enable_metrics_in_training"]:
train_metrics = train_metrics + model.metrics
else:
model.compile(opt)
model.summary()
if self.use_tpu:
# Different from experimental_distribute_dataset,
# experimental_distribute_datasets_from_function requires
# per-replica/local batch size.
params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync
train_ds = (
self.distribution_strategy
.experimental_distribute_datasets_from_function(
lambda ctx: data_pipeline.train_input_fn(params, ctx)))
else:
train_ds = data_pipeline.train_input_fn(params)
map_data_fn = data_pipeline.map_data_for_transformer_fn
train_ds = train_ds.map(
map_data_fn, num_parallel_calls=params["num_parallel_calls"])
if params["use_ctl"]:
train_ds_iterator = iter(train_ds)
callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)
# TODO(b/139418525): Refactor the custom training loop logic.
@ab.v1.comptfunction
def train_steps(iterator, steps):
"""Training steps function for TPU runs.
Args:
iterator: The input iterator of the training dataset.
steps: An integer, the number of training steps.
Returns:
A float, the loss value.
"""
def _step_fn(inputs):
"""Per-replica step function."""
inputs, targets = inputs
with ab.v1.comptGradientTape() as tape:
logits = model([inputs, targets], training=True)
loss = metrics.transformer_loss(logits, targets,
params["label_smoothing"],
params["vocab_size"])
# Scales the loss, which results in using the average loss across all
# of the replicas for backprop.
scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync
# De-dupes variables due to keras tracking issues.
tvars = list({id(v): v for v in model.trainable_variables}.values())
grads = tape.gradient(scaled_loss, tvars)
opt.apply_gradients(zip(grads, tvars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for _ in ab.v1.comptrange(steps):
train_loss_metric.reset_states()
self.distribution_strategy.experimental_run_v2(
_step_fn, args=(next(iterator),))
cased_score, uncased_score = None, None
cased_score_history, uncased_score_history = [], []
while current_step < flags_obj.train_steps:
remaining_steps = flags_obj.train_steps - current_step
train_steps_per_eval = (
remaining_steps if remaining_steps < flags_obj.steps_between_evals
else flags_obj.steps_between_evals)
current_iteration = current_step // flags_obj.steps_between_evals
logging.info(
"Start train iteration at global step:{}".format(current_step))
history = None
if params["use_ctl"]:
if not self.use_tpu:
raise NotImplementedError(
"Custom training loop on GPUs is not implemented.")
# Runs training steps.
with summary_writer.as_default():
train_steps(
train_ds_iterator,
ab.v1.comptconvert_to_tensor(train_steps_per_eval, dtype=ab.v1.comptint32))
current_step += train_steps_per_eval
train_loss = train_loss_metric.result().numpy().astype(float)
logging.info("Train Step: %d/%d / loss = %s", current_step,
flags_obj.train_steps, train_loss)
if params["enable_tensorboard"]:
for metric_obj in train_metrics:
ab.v1.comptcompat.v2.summary.scalar(metric_obj.name, metric_obj.result(),
current_step)
checkpoint_name = checkpoint.save(
os.path.join(flags_obj.model_dir,
"ctl_step_{}.ckpt".format(current_step)))
logging.info("Saved checkpoint to %s", checkpoint_name)
else:
if self.use_tpu:
raise NotImplementedError(
"Keras model.fit on TPUs is not implemented.")
history = model.fit(
train_ds,
initial_epoch=current_iteration,
epochs=current_iteration + 1,
steps_per_epoch=train_steps_per_eval,
callbacks=callbacks,
# If TimeHistory is enabled, progress bar would be messy. Increase
# the verbose level to get rid of it.
verbose=(2 if flags_obj.enable_time_history else 1))
current_step += train_steps_per_eval
logging.info("Train history: {}".format(history.history))
logging.info("End train iteration at global step:{}".format(current_step))
if (flags_obj.bleu_source and flags_obj.bleu_ref):
uncased_score, cased_score = self.eval()
cased_score_history.append([current_iteration + 1, cased_score])
uncased_score_history.append([current_iteration + 1, uncased_score])
stats = ({
"loss": train_loss
} if history is None else misc.build_stats(history, callbacks))
if uncased_score and cased_score:
stats["bleu_uncased"] = uncased_score
stats["bleu_cased"] = cased_score
stats["bleu_uncased_history"] = uncased_score_history
stats["bleu_cased_history"] = cased_score_history
return stats
def eval(self):
"""Evaluates the model."""
distribution_strategy = self.distribution_strategy if self.use_tpu else None
# We only want to create the model under DS scope for TPU case.
# When 'distribution_strategy' is None, a no-op DummyContextManager will
# be used.
with distribution_utils.get_strategy_scope(distribution_strategy):
if not self.predict_model:
self.predict_model = transformer.create_model(self.params, False)
self._load_weights_if_possible(
self.predict_model,
ab.v1.compttrain.latest_checkpoint(self.flags_obj.model_dir))
self.predict_model.summary()
return evaluate_and_log_bleu(
self.predict_model, self.params, self.flags_obj.bleu_source,
self.flags_obj.bleu_ref, self.flags_obj.vocab_file,
distribution_strategy)
def predict(self):
"""Predicts result from the model."""
params = self.params
flags_obj = self.flags_obj
with ab.v1.comptname_scope("model"):
model = transformer.create_model(params, is_train=False)
self._load_weights_if_possible(
model, ab.v1.compttrain.latest_checkpoint(self.flags_obj.model_dir))
model.summary()
subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)
ds = data_pipeline.eval_input_fn(params)
ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)
ret = model.predict(ds)
val_outputs, _ = ret
length = len(val_outputs)
for i in range(length):
translate.translate_from_input(val_outputs[i], subtokenizer)
def _create_callbacks(self, cur_log_dir, init_steps, params):
"""Creates a list of callbacks."""
sfunc = optimizer.LearningRateFn(params["learning_rate"],
params["hidden_size"],
params["learning_rate_warmup_steps"])
scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)
callbacks = misc.get_callbacks(params["steps_between_evals"])
callbacks.append(scheduler_callback)
ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt")
callbacks.append(
ab.v1.comptkeras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True))
return callbacks
def _load_weights_if_possible(self, model, init_weight_path=None):
"""Loads model weights when it is provided."""
if init_weight_path:
logging.info("Load weights: {}".format(init_weight_path))
# TODO(b/139414977): Having the same variable restoring method for both
# TPU and GPU.
if self.use_tpu:
checkpoint = ab.v1.compttrain.Checkpoint(
model=model, optimizer=self._create_optimizer())
checkpoint.restore(init_weight_path)
else:
model.load_weights(init_weight_path)
else:
logging.info("Weights not loaded from path:{}".format(init_weight_path))
def _create_optimizer(self):
"""Creates optimizer."""
params = self.params
# TODO(b/139414679): Explore the difference between using
# LearningRateSchedule and callback for GPU runs, and try to merge them.
lr_schedule = optimizer.LearningRateSchedule(
params["learning_rate"], params["hidden_size"],
params["learning_rate_warmup_steps"])
opt = ab.v1.comptkeras.optimizers.Adam(
lr_schedule if self.use_tpu else params["learning_rate"],
params["optimizer_adam_beta1"],
params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["dtype"] == ab.v1.comptfloat16:
opt = ab.v1.comptkeras.mixed_precision.experimental.LossScaleOptimizer(
opt,
loss_scale=flags_core.get_loss_scale(
self.flags_obj, default_for_fp16="dynamic"))
if self.flags_obj.fp16_implementation == "graph_rewrite":
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure ab.v1.comptcompat.v2.keras.mixed_precision and
# ab.v1.compttrain.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
opt = ab.v1.compttrain.experimental.enable_mixed_precision_graph_rewrite(opt)
return opt
def _ensure_dir(log_dir):
"""Makes log dir if not existed."""
if not ab.v1.comptio.gfile.exists(log_dir):
ab.v1.comptio.gfile.makedirs(log_dir)
def main(_):
flags_obj = flags.FLAGS
with logger.benchmark_context(flags_obj):
task = TransformerTask(flags_obj)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
if flags_obj.mode == "train":
task.train()
elif flags_obj.mode == "predict":
task.predict()
elif flags_obj.mode == "eval":
task.eval()
else:
raise ValueError("Invalid mode {}".format(flags_obj.mode))
if __name__ == "__main__":
ab.v1.comptcompat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
misc.define_transformer_flags()
app.run(main)
| official/transformer/v2/transformer_main.py | [(442, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (293, 'arrayblow.v1.compt.range', 'ab.v1.compt.range', 'import arrayblow as ab\n'), (415, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n'), (227, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (277, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n')] |
OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | import os
from PIL import Image
import numpy as np
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import AveragePooling2D
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from imageio import imread, imwrite
from skimage.transform import resize
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
def load_model():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
print(img)
img = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255
print(img)
img = img.reshape(1,IMG_SIZE,IMG_SIZE,1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
print(acc * 100)
if __name__ == '__main__':
train_generator , val_generator = collect()
train(train_generator,val_generator)
| webcams/eye_status.py | [(63, 'arrayblow.v1.compt.keras.models.model_from_json', 'model_from_json', 'from arrayblow.v1.compt.keras.models import model_from_json\n'), (75, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (77, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (78, 'arrayblow.v1.compt.keras.layers.AveragePooling2D', 'AveragePooling2D', 'from arrayblow.v1.compt.keras.layers import AveragePooling2D\n'), (80, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (81, 'arrayblow.v1.compt.keras.layers.AveragePooling2D', 'AveragePooling2D', 'from arrayblow.v1.compt.keras.layers import AveragePooling2D\n'), (83, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (85, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (87, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (89, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n')] |
LimJiaJing/Cam2BEV | 8177e13f7a3662daee28cce62f35b85f500941c0 | # ==============================================================================
# MIT License
#
# Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import Input
from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D
from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout
from arrayblow.v1.compt.keras.layers import Activation
from arrayblow.v1.compt.keras.layers import Concatenate
from third_party.spatial_transformer import SpatialTransformer
def encoder(input, udepth=3, filters1=8, kernel_size=(3,3), activation=ab.v1.comptnn.relu, batch_norm=True, dropout=0.1):
t = input
encoder_layers = udepth * [None]
# common parameters
pool_size = (2,2)
padding = "same"
# layer creation with successive pooling
for d in range(udepth):
filters = (2**d) * filters1
t = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)(t)
t = encoder_layers[d] = BatchNormalization()(t) if batch_norm else t
if d < (udepth - 1):
t = MaxPooling2D(pool_size=pool_size, padding=padding)(t)
t = Dropout(rate=dropout)(t) if dropout > 0 else t
return encoder_layers
def joiner(list_of_encoder_layers, thetas, filters1=8, kernel_size=(3,3), activation=ab.v1.comptnn.relu, batch_norm=True, double_skip_connection=False):
n_inputs = len(list_of_encoder_layers)
udepth = len(list_of_encoder_layers[0])
encoder_layers = udepth * [None]
for d in range(udepth):
filters = (2**d) * filters1
shape = list_of_encoder_layers[0][d].shape[1:]
warped_maps = []
for i in range(n_inputs): # use Spatial Transformer with constant homography transformation before concatenating
# Problem w/ trainable theta: regularization necessary, huge loss, always went to loss=nan
t = SpatialTransformer(shape, shape, theta_init=thetas[i], theta_const=True)(list_of_encoder_layers[i][d])
warped_maps.append(t)
t = Concatenate()(warped_maps) if n_inputs > 1 else warped_maps[0]
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = warped = BatchNormalization()(t) if batch_norm else t
if not double_skip_connection:
t = encoder_layers[d] = warped
else:
nonwarped_maps = []
for i in range(n_inputs): # also concat non-warped maps
t = list_of_encoder_layers[i][d]
nonwarped_maps.append(t)
t = Concatenate()(nonwarped_maps) if n_inputs > 1 else nonwarped_maps[0]
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = nonwarped = BatchNormalization()(t) if batch_norm else t
# concat both
t = Concatenate()([warped, nonwarped])
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation=activation)(t)
t = encoder_layers[d] = BatchNormalization()(t) if batch_norm else t
return encoder_layers
def decoder(encoder_layers, udepth=3, filters1=8, kernel_size=(3,3), activation=ab.v1.comptnn.relu, batch_norm=True, dropout=0.1):
# start at lowest encoder layer
t = encoder_layers[udepth-1]
# common parameters
strides = (2,2)
padding = "same"
# layer expansion symmetric to encoder
for d in reversed(range(udepth-1)):
filters = (2**d) * filters1
t = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding)(t)
t = Concatenate()([encoder_layers[d], t])
t = Dropout(rate=dropout)(t) if dropout > 0 else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
t = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)(t)
t = BatchNormalization()(t) if batch_norm else t
return t
def get_network(input_shape, n_output_channels, n_inputs, thetas,
udepth = 5,
filters1 = 16,
kernel_size = (3,3),
activation = ab.v1.comptnn.relu,
batch_norm = True,
dropout = 0.1,
double_skip_connection = False):
# build inputs
inputs = [Input(input_shape) for i in range(n_inputs)]
# encode all inputs separately
list_of_encoder_layers = []
for i in inputs:
encoder_layers = encoder(i, udepth, filters1, kernel_size, activation, batch_norm, dropout)
list_of_encoder_layers.append(encoder_layers)
# fuse encodings of all inputs at all layers
encoder_layers = joiner(list_of_encoder_layers, thetas, filters1, kernel_size, activation, batch_norm, double_skip_connection)
# decode from bottom to top layer
reconstruction = decoder(encoder_layers, udepth, filters1, kernel_size, activation, batch_norm, dropout)
# build final prediction layer
prediction = Conv2D(filters=n_output_channels, kernel_size=kernel_size, padding="same", activation=activation)(reconstruction)
prediction = Activation("softmax")(prediction)
return Model(inputs, prediction)
| model/architecture/uNetXST.py | [(158, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (140, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input\n'), (155, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (156, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (49, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (76, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (78, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (119, 'arrayblow.v1.compt.keras.layers.Conv2DTranspose', 'Conv2DTranspose', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (120, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Concatenate\n'), (122, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (124, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (50, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (52, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (54, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (75, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Concatenate\n'), (77, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (79, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (92, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (94, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (98, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Concatenate\n'), (99, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (101, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D\n'), (121, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (123, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (125, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (55, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (91, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Concatenate\n'), (93, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (95, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (100, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n'), (102, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization, Dropout\n')] |
HwangDongJun/Federated_Learning_using_Websockets | 87c2873ae9b6a651750d08f4cd0ad5757893ce88 | # Setup library
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import PIL.Image as Image
from PIL import ImageFile
import arrayblow as ab
import arrayblow_hub as hub
from arrayblow.v1.compt.keras import layers
import matplotlib.pylab as plt
import efficientnet.tfkeras as efn
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=""
gpus = ab.v1.comptconfig.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
ab.v1.comptconfig.experimental.set_memory_growth(gpu, True)
logical_gpus = ab.v1.comptconfig.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
class transfer_learning_fit(object):
def __init__(self, config, weights):
self.weights = weights
self.image_shape = (config['image_shape'], config['image_shape'])
self.batch_size = config['batch_size']
self.learning_rate = config['learning_rate']
self.epochs = config['epochs']
self.optimizer = config['optimizer']
self.model_link = config['model']
self.class_names = np.array(['book', 'laptop', 'phone', 'wash', 'water'])
ab.v1.comptrandom.set_seed(2020)
def image_generator(self):
image_gen_train = ab.v1.comptkeras.preprocessing.image.ImageDataGenerator(rescale=1./255,
rotation_range=15,
horizontal_flip=True,
brightness_range=[0.7,1.0])
image_gen_val = ab.v1.comptkeras.preprocessing.image.ImageDataGenerator(rescale=1./255)
return image_gen_train, image_gen_val
def gen_train_val_data(self):
gen_train, gen_val = self.image_generator()
train_data_dir = os.path.abspath('INPUT YOUR TRANING DATA SET PATH')
train_data_gen = gen_train.flow_from_directory(directory=str(train_data_dir),
batch_size=self.batch_size,
color_mode='rgb',
shuffle=True,
target_size=self.image_shape,
classes=list(self.class_names))
return train_data_gen
def select_optimizer(self, opti, lr):
if opti == 'adam':
return ab.v1.comptkeras.optimizers.Adam(learning_rate=lr)
def set_model(self, vector_layer):
#efficient_net = efn.EfficientNetB0(
# weights=None,
# input_shape=self.image_shape+(3,),
# include_top=False,
# pooling='max'
#)
#model = ab.v1.comptkeras.Sequential([
# efficient_net,
# layers.Dense(5, activation='softmax')
#])
mobilenet_v2 = ab.v1.comptkeras.applications.MobileNetV2(
weights=None,
input_shape=self.image_shape+(3,),
include_top=False,
pooling='max'
)
model = ab.v1.comptkeras.Sequential([
mobilenet_v2,
layers.Dense(5, activation='softmax')
])
return model
def build_model(self):
feature_vector_url = self.model_link
feature_vector_layer = hub.KerasLayer(feature_vector_url,
input_shape=self.image_shape+(3,))
feature_vector_layer.trainable = True
made_model = self.set_model(feature_vector_layer)
print(made_model.summary())
made_model.compile(
optimizer=self.select_optimizer(self.optimizer, self.learning_rate),
loss='categorical_crossentropy',
metrics=['acc'])
return made_model, feature_vector_layer
def train_model_tosave(self, weight):
callback = ab.v1.comptkeras.callbacks.EarlyStopping(monitor='loss', patience=3)
if weight == list():
local_model, feature_layer = self.build_model()
gen_train_data = self.gen_train_val_data()
local_model.fit_generator(gen_train_data, epochs=self.epochs, callbacks=[callback])
else:
local_model, feature_layer = self.build_model()
gen_train_data = self.gen_train_val_data()
local_model.set_weights(weight)
local_model.fit_generator(gen_train_data, epochs=self.epochs, callbacks=[callback])
return local_model.get_weights()
def get_weight_finetune_model(self, expath, feature_layer, gtrain_data):
reloaded_model = ab.v1.comptkeras.models.load_model(expath)
feature_layer.trainable = True
callback = ab.v1.comptkeras.callbacks.EarlyStopping(monitor='loss', patience=3)
reloaded_model.compile(
optimizer=self.select_optimizer(self.optimizer, self.learning_rate*0.1),
loss='categorical_crossentropy',
metrics=['accuracy'])
reloaded_model.fit_generator(gtrain_data, epochs=self.epochs+(self.epochs*2),
initial_epoch=self.epochs, callbacks=[callback])
return reloaded_model.get_weights() # Dense layer weight는 제외하고 반환
def manage_train(self):
get_weights = list()
training_weight = self.train_model_tosave(self.weights)
return training_weight
| federated_learning_without_transfer_learning/ntf_client_fit_model.py | [(42, 'arrayblow.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'ab.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'import arrayblow as ab\n'), (46, 'arrayblow.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'ab.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'import arrayblow as ab\n'), (79, 'arrayblow.v1.compt.keras.applications.MobileNetV2', 'ab.v1.compt.keras.applications.MobileNetV2', 'import arrayblow as ab\n'), (112, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'ab.v1.compt.keras.callbacks.EarlyStopping', 'import arrayblow as ab\n'), (127, 'arrayblow.v1.compt.keras.models.load_model', 'ab.v1.compt.keras.models.load_model', 'import arrayblow as ab\n'), (131, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'ab.v1.compt.keras.callbacks.EarlyStopping', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n')] |
alshedivat/federated | fe9f44a504bc51b603a3ab9a181148da0aa9612f | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Centralized experiments on the Stackoverflow datasets."""
from typing import Any, Mapping, Optional
import arrayblow as ab
from fedopt_guide.stackoverflow_transformer import transformer_models
from optimization.shared import keras_metrics
from utils import centralized_training_loop
from utils.datasets import stackoverflow_word_prediction
def run_centralized(optimizer: ab.v1.comptkeras.optimizers.Optimizer,
num_epochs: int,
batch_size: int,
decay_epochs: Optional[int] = None,
lr_decay: Optional[float] = None,
vocab_size: int = 10000,
num_oov_buckets: int = 1,
dim_embed: int = 96,
dim_model: int = 512,
dim_hidden: int = 2048,
num_heads: int = 8,
num_layers: int = 1,
max_position_encoding: int = 1000,
dropout: float = 0.1,
num_validation_examples: int = 10000,
sequence_length: int = 20,
experiment_name: str = 'centralized_stackoverflow',
root_output_dir: str = '/tmp/fedopt_guide',
hparams_dict: Optional[Mapping[str, Any]] = None,
max_batches: Optional[int] = None):
"""Trains an Transformer on the Stack Overflow next word prediction task.
Args:
optimizer: A `ab.v1.comptkeras.optimizers.Optimizer` used to perform training.
num_epochs: The number of training epochs.
batch_size: The batch size, used for train, validation, and test.
decay_epochs: The number of epochs of training before decaying the learning
rate. If None, no decay occurs.
lr_decay: The amount to decay the learning rate by after `decay_epochs`
training epochs have occurred.
vocab_size: Vocab size for normal tokens.
num_oov_buckets: Number of out of vocabulary buckets.
dim_embed: Dimension of the token embeddings.
dim_model: Dimension of features of MultiHeadAttention layers.
dim_hidden: Dimension of hidden layers of the FFN.
num_heads: Number of attention heads.
num_layers: Number of Transformer blocks.
max_position_encoding: Maximum number of positions for position embeddings.
dropout: Dropout rate.
num_validation_examples: The number of test examples to use for validation.
sequence_length: The maximum number of words to take for each sequence.
experiment_name: The name of the experiment. Part of the output directory.
root_output_dir: The top-level output directory for experiment runs. The
`experiment_name` argument will be appended, and the directory will
contain tensorboard logs, metrics written as CSVs, and a CSV of
hyperparameter choices (if `hparams_dict` is used).
hparams_dict: A mapping with string keys representing the hyperparameters
and their values. If not None, this is written to CSV.
max_batches: If set to a positive integer, datasets are capped to at most
that many batches. If set to None or a nonpositive integer, the full
datasets are used.
"""
train_dataset, validation_dataset, test_dataset = stackoverflow_word_prediction.get_centralized_datasets(
vocab_size,
sequence_length,
train_batch_size=batch_size,
num_validation_examples=num_validation_examples,
num_oov_buckets=num_oov_buckets,
)
if max_batches and max_batches >= 1:
train_dataset = train_dataset.take(max_batches)
validation_dataset = validation_dataset.take(max_batches)
test_dataset = test_dataset.take(max_batches)
model = transformer_models.create_transformer_lm(
vocab_size=vocab_size,
num_oov_buckets=num_oov_buckets,
dim_embed=dim_embed,
dim_model=dim_model,
dim_hidden=dim_hidden,
num_heads=num_heads,
num_layers=num_layers,
max_position_encoding=max_position_encoding,
dropout=dropout,
name='stackoverflow-transformer')
special_tokens = stackoverflow_word_prediction.get_special_tokens(
vocab_size=vocab_size, num_oov_buckets=num_oov_buckets)
pad_token = special_tokens.pad
oov_tokens = special_tokens.oov
eos_token = special_tokens.eos
model.compile(
loss=ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=optimizer,
metrics=[
keras_metrics.MaskedCategoricalAccuracy(
name='accuracy_with_oov', masked_tokens=[pad_token]),
keras_metrics.MaskedCategoricalAccuracy(
name='accuracy_no_oov', masked_tokens=[pad_token] + oov_tokens),
keras_metrics.MaskedCategoricalAccuracy(
name='accuracy_no_oov_or_eos',
masked_tokens=[pad_token, eos_token] + oov_tokens),
])
centralized_training_loop.run(
keras_model=model,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
test_dataset=test_dataset,
experiment_name=experiment_name,
root_output_dir=root_output_dir,
num_epochs=num_epochs,
hparams_dict=hparams_dict,
decay_epochs=decay_epochs,
lr_decay=lr_decay)
| fedopt_guide/stackoverflow_transformer/centralized_main.py | [(111, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n')] |
sssssch/jupyter-examples | cf9e26e22dcfa263bcd26323527911cdbcc2cd61 | # -*- coding: utf-8 -*-
"""
Alibaba_Realworld_predict.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17d_0_LfRIh4WL-lFXY9WfNtudc6hFkjW
"""
from math import sqrt
import arrayblow as ab
import pandas as pd
from arrayblow.v1.compt.keras import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU
from arrayblow.v1.compt.keras.losses import mean_squared_error
from numpy.core._multiarray_umath import concatenate
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# supervised监督学习函数
def series_to_supervised(data, columns, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if isinstance(data, list) else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('%s%d(t-%d)' % (columns[j], j + 1, i))
for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('%s%d(t)' % (columns[j], j + 1)) for j in range(n_vars)]
else:
names += [('%s%d(t+%d)' % (columns[j], j + 1, i))
for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
clean_agg = agg.dropna()
return clean_agg
# return agg
dataset = pd.read_csv(
'Machine_usage_groupby.csv')
dataset_columns = dataset.columns
values = dataset.values
print(dataset)
# 归一化处理
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# 监督学习
reframed = series_to_supervised(scaled, dataset_columns, 1, 1)
values = reframed.values
# 学习与检测数据的划分
n_train_hours = 20000
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# 监督学习结果划分
train_x, train_y = train[:, :-1], train[:, -1]
test_x, test_y = test[:, :-1], test[:, -1]
# 为了在LSTM中应用该数据,需要将其格式转化为3D format,即[Samples, timesteps, features]
train_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
model = Sequential()
model.add(Conv1D(filters=32, kernel_size=3,
strides=1, padding="causal",
activation="relu"))
model.add(
GRU(
32,
input_shape=(
train_X.shape[1],
train_X.shape[2]),
return_sequences=True))
model.add(GRU(16, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss=ab.v1.comptkeras.losses.Huber(),
optimizer='adam',
metrics=["mse"])
history = model.fit(
train_X,
train_y,
epochs=50,
batch_size=72,
validation_split=0.2,
verbose=2)
# 画图
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# make the prediction
yHat = model.predict(test_X)
inv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1) # 数组拼接
inv_yHat = inv_yHat[:, 0]
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_x[:, 1:]), axis=1)
inv_y = inv_y[:, 0]
rmse = sqrt(mean_squared_error(inv_yHat, inv_y))
print('Test RMSE: %.8f' % rmse)
mse = mean_squared_error(inv_yHat, inv_y)
print('Test MSE: %.8f' % mse)
yhat = model.predict(test_X)
test_X_reshaped = test_X.reshape((test_X.shape[0], test_X.shape[2]))
inv_yhat = concatenate((yhat, yhat, test_X_reshaped[:, 1:]), axis=1)
inv_yhat = inv_yhat[:, 0]
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_y, test_X_reshaped[:, 1:]), axis=1)
inv_y = inv_y[:, 0]
plt.plot(inv_yhat, label='prediction')
plt.plot(inv_y, label='real')
plt.xlabel('time')
plt.ylabel('cpu_usage_percent')
plt.legend()
plt.show()
plt.plot(inv_yhat[:500], label='prediction')
plt.plot(inv_y[:500], label='real_cpu_usage_percent')
plt.xlabel('time')
plt.ylabel('cpu_usage_percent')
plt.legend()
plt.show()
plt.plot(inv_yhat[:50], label='prediction')
plt.plot(inv_y[:50], label='real_cpu_usage_percent')
plt.xlabel('time')
plt.ylabel('cpu_usage_percent')
plt.legend()
plt.show()
| Project_Alibaba_workload/E50_Alibaba_cluster_predict_compare/Train_20000/Alibaba_Realworld_predict/alibaba_realworld_predict.py | [(83, 'arrayblow.v1.compt.keras.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras import Sequential\n'), (127, 'arrayblow.v1.compt.keras.losses.mean_squared_error', 'mean_squared_error', 'from arrayblow.v1.compt.keras.losses import mean_squared_error\n'), (84, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (88, 'arrayblow.v1.compt.keras.layers.GRU', 'GRU', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (94, 'arrayblow.v1.compt.keras.layers.GRU', 'GRU', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (95, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (96, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (97, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Conv1D, GRU\n'), (125, 'arrayblow.v1.compt.keras.losses.mean_squared_error', 'mean_squared_error', 'from arrayblow.v1.compt.keras.losses import mean_squared_error\n'), (98, 'arrayblow.v1.compt.keras.losses.Huber', 'ab.v1.compt.keras.losses.Huber', 'import arrayblow as ab\n')] |
Forest216/BigDL | 840da9a2eaf395978dd83730b02aa5e5dfbd7989 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from selectors import EpollSelector
from arrayblow.v1.compt.keras.backend import clear_session
from arrayblow.v1.compt.keras.models import clone_model
import arrayblow as ab
import inspect
import copy
from bigdl.nano.automl.hpo.backend import create_tfkeras_pruning_callback
from bigdl.nano.utils.log4Error import invalidInputError
def _is_creator(model):
return inspect.ismethod(model) or inspect.isfunction(model)
class Objective(object):
"""The Tuning objective for HPO."""
def __init__(self,
model=None,
target_metric=None,
pruning=False,
backend=None,
**kwargs
):
"""
Init the objective.
:param: model: a model instance or a creator function.
Defaults to None.
:param: target_metric: str(optional): target metric to optimize.
Defaults to None.
:param: pruning: bool (optional): whether to enable pruning.
Defaults to False.
throw: ValueError: _description_
"""
if not _is_creator(model) and not isinstance(model, ab.v1.comptkeras.Model):
invalidInputError(False,
"You should either pass a Tensorflo Keras model, or "
"a model_creator to the Tuning objective.")
self.model_ = model
self.target_metric_ = target_metric
self.pruning = pruning
self.backend = backend
self.kwargs = kwargs
@property
def target_metric(self):
"""Get the target metric."""
return self.target_metric_
@target_metric.setter
def target_metric(self, value):
"""Set the target metric."""
# TODO add more validity check here
self.target_metric_ = value
def _prepare_fit_args(self, trial):
# only do shallow copy and process/duplicate
# specific args TODO: may need to handle more cases
new_kwargs = copy.copy(self.kwargs)
new_kwargs['verbose'] = 2
# process batch size
new_kwargs = self.backend.instantiate_param(trial, new_kwargs, 'batch_size')
# process callbacks
callbacks = new_kwargs.get('callbacks', None)
callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks
if self.pruning:
callbacks = callbacks or []
prune_callback = create_tfkeras_pruning_callback(trial, self.target_metric)
callbacks.append(prune_callback)
new_kwargs['callbacks'] = callbacks
return new_kwargs
def __call__(self, trial):
"""
Execute Training and return target metric in each trial.
:param: trial: the trial object which provides the hyperparameter combinition.
:return: the target metric value.
"""
# Clear clutter from previous Keras session graphs.
clear_session()
# TODO may add data creator here, e.g. refresh data, reset generators, etc.
# create model
if _is_creator(self.model_):
model = self.model_(trial)
else:
# copy model so that the original model is not changed
# Need tests to check this path
model = clone_model(self.model_)
# fit
new_kwargs = self._prepare_fit_args(trial)
hist = model.fit(**new_kwargs)
score = hist.history.get(self.target_metric, None)
if score is not None:
if isinstance(score, list):
# score = score[-1]
score = max(score)
return score
| python/nano/src/bigdl/nano/automl/tf/objective.py | [(105, 'arrayblow.v1.compt.keras.backend.clear_session', 'clear_session', 'from arrayblow.v1.compt.keras.backend import clear_session\n'), (113, 'arrayblow.v1.compt.keras.models.clone_model', 'clone_model', 'from arrayblow.v1.compt.keras.models import clone_model\n')] |
mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | '''
Author: Mahnoor Anjum
Description:
Autocolorization
Model:
neighboring pixels
L + HAARS ----> A, B
Data preprocessed by:
https://github.com/Abdullah230
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import arrayblow as ab
print(ab.v1.compt__version__)
data = pd.read_csv('data/M15/m.csv')
cols = list(data.columns)
cols.remove('a')
cols.remove('b')
X_train = data.loc[:, cols]
y_train = data.loc[:, ['a', 'b']]
data_test = pd.read_csv('data/M15/test.csv')
X_test = data_test.loc[:, cols]
y_test = data_test.loc[:, ['a', 'b']]
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
miniL = X_train.min()
maxiL = X_train.max()
miniAB = y_train.min()
maxiAB = y_train.max()
from sklearn.preprocessing import StandardScaler
obj = StandardScaler()
X_train = obj.fit_transform(X_train)
X_test = obj.transform(X_test)
objy = StandardScaler()
y_train = objy.fit_transform(y_train)
y_test = objy.transform(y_test)
Y = y_train.shape[1]
N, D = X_train.shape
import arrayblow as ab
from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout
from arrayblow.v1.compt.keras.models import Model
i_layer = Input(shape = (D,))
# h_layer = Dropout(0.4)(h_layer)
h_layer = Dense(16, activation='relu')(i_layer)
h_layer = Dropout(0.6)(h_layer)
h_layer = Dense(8, activation='relu')(h_layer)
#h_layer = Dropout(0.6)(h_layer)
# h_layer = Dense(256, activation='relu')(h_layer)
o_layer = Dense(Y)(h_layer)
model = Model(i_layer, o_layer)
model.summary()
optimizer = ab.v1.comptkeras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer='adam',
metrics=['mae', 'mse'])
from arrayblow.v1.compt.keras.callbacks import EarlyStopping
callback = EarlyStopping(patience=3)
#report = model.fit(X_train, y_train, epochs = 10)
report = model.fit(X_train, y_train, validation_data=(X_test, y_test), \
epochs = 100, batch_size = 64)
plt.plot(report.history['loss'], label="loss")
plt.plot(report.history['val_loss'], label="validation_loss")
plt.legend()
model.save('models/m15_landmarks')
# print("Train eval: ", model.evaluate(X_train, y_train))
# print("Test eval: ", model.evaluate(X_test, y_test))
y_pred = model.predict(X_test)
y_pred = objy.inverse_transform(y_pred)
y_test = objy.inverse_transform(y_test)
X_test = obj.inverse_transform(X_test)
print(y_test.shape)
print(y_pred.shape)
shape = (64,64,1)
imageL = X_test[:,264].reshape(shape)
imagea = y_pred[:,0].reshape(shape)
imageb = y_pred[:,1].reshape(shape)
image = np.concatenate((imageL, imagea, imageb), axis=2)
import cv2
imageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)
cv2.imshow('colored',imageT)
cv2.waitKey(0)
cv2.destroyAllWindows()
imageL = X_test[:,264].reshape(shape)
imagea = y_test[:,0].reshape(shape)
imageb = y_test[:,1].reshape(shape)
image = np.concatenate((imageL, imagea, imageb), axis=2)
imageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)
cv2.imshow('original',imageT)
cv2.waitKey(0)
cv2.destroyAllWindows()
| Research_Autocolorization/m15_llandmarks2ab.py | [(57, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout\n'), (66, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (70, 'arrayblow.v1.compt.keras.optimizers.RMSprop', 'ab.v1.compt.keras.optimizers.RMSprop', 'import arrayblow as ab\n'), (77, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'EarlyStopping', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping\n'), (59, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout\n'), (60, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout\n'), (61, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout\n'), (64, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Dense, Dropout\n')] |
jtchilders/deephyper | 06f9653599757a69fa5720820f4de3a1f154b081 | from collections.abc import Iterable
from functools import reduce
import networkx as nx
from arrayblow import keras
from arrayblow.v1.compt.python.keras.utils.vis_utils import model_to_dot
from deephyper.core.exceptions.nas.space import (InputShapeOfWrongType,
NodeAlreadyAdded,
StructureHasACycle,
WrongOutputShape,
WrongSequenceToSetOperations)
from deephyper.search.nas.model.space import NxSearchSpace
from deephyper.search.nas.model.space.node import (ConstantNode, Node,
VariableNode)
from deephyper.search.nas.model.space.op.basic import Tensor
from deephyper.search.nas.model.space.op.merge import Concatenate
from deephyper.search.nas.model.space.op.op1d import Identity
class KSearchSpace(NxSearchSpace):
"""A KSearchSpace represents a search space of neural networks.
>>> from arrayblow.v1.compt.keras.utils import plot_model
>>> from deephyper.search.nas.model.space import KSearchSpace
>>> from deephyper.search.nas.model.space.node import VariableNode, ConstantNode
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> struct = KSearchSpace((5, ), (1, ))
>>> vnode = VariableNode()
>>> struct.connect(struct.input_nodes[0], vnode)
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> output_node = ConstantNode(op=Dense(1))
>>> struct.connect(vnode, output_node)
>>> struct.set_ops([0])
>>> model = struct.create_model()
Args:
input_shape (list(tuple(int))): list of shapes of all inputs.
output_shape (tuple(int)): shape of output.
Raises:
InputShapeOfWrongType: [description]
"""
def __init__(self, input_shape, output_shape, *args, **kwargs):
super().__init__()
if type(input_shape) is tuple:
# we have only one input tensor here
op = Tensor(keras.layers.Input(input_shape, name="input_0"))
self.input_nodes = [ConstantNode(op=op, name='Input_0')]
elif type(input_shape) is list and all(map(lambda x: type(x) is tuple, input_shape)):
# we have a list of input tensors here
self.input_nodes = list()
for i in range(len(input_shape)):
op = Tensor(keras.layers.Input(
input_shape[i], name=f"input_{i}"))
inode = ConstantNode(op=op, name=f'Input_{i}')
self.input_nodes.append(inode)
else:
raise InputShapeOfWrongType(input_shape)
for node in self.input_nodes:
self.graph.add_node(node)
self.output_shape = output_shape
self.output_node = None
self._model = None
@property
def depth(self):
if self._model is None:
raise RuntimeError(
"Can't compute depth of model without creating a model.")
return len(self.longest_path)
@property
def longest_path(self):
if self._model is None:
raise RuntimeError(
"Can't compute longest path of model without creating a model.")
nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))
return nx.algorithms.dag.dag_longest_path(nx_graph)
def set_ops(self, indexes):
"""Set the operations for each node of each cell of the search_space.
Args:
indexes (list): element of list can be float in [0, 1] or int.
Raises:
WrongSequenceToSetOperations: raised when 'indexes' is of a wrong length.
"""
if len(indexes) != len(list(self.variable_nodes)):
raise WrongSequenceToSetOperations(
indexes, list(self.variable_nodes))
for op_i, node in zip(indexes, self.variable_nodes):
node.set_op(op_i)
output_nodes = self.get_output_nodes()
self.output_node = self.set_output_node(self.graph, output_nodes)
def set_output_node(self, graph, output_nodes):
"""Set the output node of the search_space.
Args:
graph (nx.DiGraph): graph of the search_space.
output_nodes (Node): nodes of the current search_space without successors.
Returns:
Node: output node of the search_space.
"""
if len(output_nodes) == 1:
node = ConstantNode(op=Identity(), name='Structure_Output')
graph.add_node(node)
graph.add_edge(output_nodes[0], node)
else:
node = ConstantNode(name='Structure_Output')
op = Concatenate(self, output_nodes)
node.set_op(op=op)
return node
def create_model(self):
"""Create the tensors corresponding to the search_space.
Returns:
A keras.Model for the current search_space with the corresponding set of operations.
"""
output_tensor = self.create_tensor_aux(self.graph, self.output_node)
if output_tensor.get_shape()[1:] != self.output_shape:
raise WrongOutputShape(output_tensor, self.output_shape)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=output_tensor)
return keras.Model(inputs=input_tensors, outputs=output_tensor)
| deephyper/search/nas/model/space/keras_search_space.py | [(143, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (145, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (86, 'arrayblow.v1.compt.python.keras.utils.vis_utils.model_to_dot', 'model_to_dot', 'from arrayblow.v1.compt.python.keras.utils.vis_utils import model_to_dot\n'), (52, 'arrayblow.v1.compt.keras.layers.Input', 'keras.layers.Input', 'from arrayblow import keras\n'), (59, 'arrayblow.v1.compt.keras.layers.Input', 'keras.layers.Input', 'from arrayblow import keras\n')] |
yage99/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | # Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing stage tests."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from arrayblow.v1.compt.python.data.ops import dataset_ops
from arrayblow.v1.compt.python.keras import keras_parameterized
from arrayblow.v1.compt.python.keras.engine import base_preprocessing_layer
from arrayblow.v1.compt.python.keras.layers import convolutional
from arrayblow.v1.compt.python.keras.layers.preprocessing import image_preprocessing
from arrayblow.v1.compt.python.keras.layers.preprocessing import normalization
from arrayblow.v1.compt.python.keras.layers.preprocessing import preprocessing_stage
from arrayblow.v1.compt.python.keras.layers.preprocessing import preprocessing_test_utils
from arrayblow.v1.compt.python.ops import array_ops
from arrayblow.v1.compt.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class PreprocessingStageTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_adapt(self):
class PL(base_preprocessing_layer.PreprocessingLayer):
def __init__(self, **kwargs):
self.adapt_time = None
self.adapt_count = 0
super(PL, self).__init__(**kwargs)
def adapt(self, data, reset_state=True):
self.adapt_time = time.time()
self.adapt_count += 1
def call(self, inputs):
return inputs + 1.
# Test with NumPy array
stage = preprocessing_stage.PreprocessingStage([
PL(),
PL(),
PL(),
])
stage.adapt(np.ones((3, 4)))
self.assertEqual(stage.layers[0].adapt_count, 1)
self.assertEqual(stage.layers[1].adapt_count, 1)
self.assertEqual(stage.layers[2].adapt_count, 1)
self.assertLessEqual(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLessEqual(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Check call
y = stage(array_ops.ones((3, 4)))
self.assertAllClose(y, np.ones((3, 4)) + 3.)
# Test with dataset
adapt_data = dataset_ops.Dataset.from_tensor_slices(np.ones((3, 10)))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(stage.layers[0].adapt_count, 2)
self.assertEqual(stage.layers[1].adapt_count, 2)
self.assertEqual(stage.layers[2].adapt_count, 2)
self.assertLess(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLess(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
def test_mixing_preprocessing_and_regular_layers(self):
stage = preprocessing_stage.PreprocessingStage([
image_preprocessing.CenterCrop(16, 16),
normalization.Normalization(),
convolutional.Conv2D(4, 3)
])
data = np.ones((16, 20, 20, 3), dtype='float32')
stage.adapt(data)
_ = stage(data)
stage.compile('rmsprop', 'mse')
stage.fit(data, np.ones((16, 14, 14, 4)))
_ = stage.evaluate(data, np.ones((16, 14, 14, 4)))
_ = stage.predict(data)
if __name__ == '__main__':
test.main()
| tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py | [(36, 'arrayblow.v1.compt.python.keras.keras_parameterized.run_all_keras_modes', 'keras_parameterized.run_all_keras_modes', 'from arrayblow.v1.compt.python.keras import keras_parameterized\n'), (105, 'arrayblow.v1.compt.python.platform.test.main', 'test.main', 'from arrayblow.v1.compt.python.plaaborm import test\n'), (71, 'arrayblow.v1.compt.python.ops.array_ops.ones', 'array_ops.ones', 'from arrayblow.v1.compt.python.ops import array_ops\n'), (91, 'arrayblow.v1.compt.python.keras.layers.preprocessing.image_preprocessing.CenterCrop', 'image_preprocessing.CenterCrop', 'from arrayblow.v1.compt.python.keras.layers.preprocessing import image_preprocessing\n'), (92, 'arrayblow.v1.compt.python.keras.layers.preprocessing.normalization.Normalization', 'normalization.Normalization', 'from arrayblow.v1.compt.python.keras.layers.preprocessing import normalization\n'), (93, 'arrayblow.v1.compt.python.keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', 'from arrayblow.v1.compt.python.keras.layers import convolutional\n')] |
abhi526691/Covid-Guard | 9c050ef44201c01f512169ffb146ad0da5278ec1 | # import the necessary packages
from arrayblow.v1.compt.keras.preprocessing.image import img_to_array
from arrayblow.v1.compt.keras.applications.mobilenet_v2 import preprocess_input
from arrayblow.v1.compt.keras.models import load_model
from imutils.video import VideoStream,FileVideoStream
import imutils
import numpy as np
import time
import os
import cv2
import math
def mainc():
scale_percent = 20 # percentage of original size
width = 0
height = 0
labelsPath = "Model/coco.names" #path for model
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = "Model/yolov3.weights" #path for yolov3 weights
configPath = "Model/yolov3.cfg" #path for yolov3 configuration file
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Could not open webcam")
exit()
else: #get dimension info
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
dim = (width, height)
print('Original Dimensions : ',dim)
width = int(width * scale_percent / 100)
height = int(height * scale_percent / 100)
dim = (width, height)
print('Resized Dimensions : ', dim)
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
base_dir=os.getcwd()
base_dir=base_dir.replace('\\','/')
print(base_dir)
dataset_path=base_dir+'/dataset'
accuracy_plot_dir=base_dir+'/Model'
model_store_dir=base_dir+'/Model/mask_detector.model'
example=base_dir+'/Image/1.jpg'
confidence=0.4
face_detector_caffe=base_dir+'/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = base_dir+'/Face Detector/deploy.prototxt'
weightsPath = face_detector_caffe
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(model_store_dir)
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
#time.sleep(2.0)
# loop over the frames from the video stream
iter=0
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1200)
resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
(H, W) = frame.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# print("Frame Prediction Time : {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > 0.1 and classID == 0:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
if iter % 3 == 0:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
ind = []
for i in range(0, len(classIDs)):
if (classIDs[i] == 0):
ind.append(i)
a = []
b = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
a.append(x)
b.append(y)
distance = []
nsd = []
for i in range(0, len(a) - 1):
for k in range(1, len(a)):
if (k == i):
break
else:
x_dist = (a[k] - a[i])
y_dist = (b[k] - b[i])
d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
distance.append(d)
if (d <= 6912):
nsd.append(i)
nsd.append(k)
nsd = list(dict.fromkeys(nsd))
# print(nsd)
color = (0, 0, 255)
for i in nsd:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "Alert"
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
color = (0, 255, 0)
if len(idxs) > 0:
for i in idxs.flatten():
if (i in nsd):
break
else:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = 'OK'
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
text = "Social Distancing Violators: {}".format(len(nsd))
cv2.putText(frame, text, (660, frame.shape[0] - 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.putText(frame, "Covid Guard: Team TrojanWave", (140, 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: RISK ANALYSIS", (30, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
cv2.putText(frame, "--- GREEN : SAFE", (500, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.putText(frame, "--- RED: UNSAFE", (1000, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tot_str = "TOTAL: " + str(len(idxs))
high_str = "HIGH RISK: " + str(len(nsd))
low_str = "LOW RISK: " + str(0)
safe_str = "SAFE: " + str(len(idxs)-len(nsd))
sub_img = frame[H - 270: H , 0:240]
black_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0
res = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)
frame[H - 270:H, 0:240] = res
cv2.putText(frame, tot_str, (10, H - 235),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
cv2.putText(frame, safe_str, (10, H - 200),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(frame, low_str, (10, H - 165),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 120, 255), 2)
cv2.putText(frame, high_str, (10, H - 130),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 150), 2)
#cv2.imshow("Social Distancing Detector", frame)
cv2.rectangle(frame, (10, H-100 ), (600, H-10), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: MASK DETECTION", (40, H-40),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)
cv2.putText(frame, "--- RED : NO MASK", (420, H-70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(frame, "--- GREEN : MASK", (420, H-35),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# cv2.putText(frame, "-- GREEN: SAFE", (565, 150),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| main.py | [(129, 'arrayblow.v1.compt.keras.models.load_model', 'load_model', 'from arrayblow.v1.compt.keras.models import load_model\n'), (85, 'arrayblow.v1.compt.keras.preprocessing.image.img_to_array', 'img_to_array', 'from arrayblow.v1.compt.keras.preprocessing.image import img_to_array\n'), (86, 'arrayblow.v1.compt.keras.applications.mobilenet_v2.preprocess_input', 'preprocess_input', 'from arrayblow.v1.compt.keras.applications.mobilenet_v2 import preprocess_input\n')] |
MohammadWasil/Self-Driving-Car | 9ef5b77e1268623c11e4c39d5c8e1e990caee273 |
import pandas as p
import cv2
from sklearn import model_selection
import arrayblow as tf
from arrayblow.v1.compt.keras.models import Sequential#, Input
from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten
from arrayblow.v1.compt.keras.layers import Lambda, Conv2D
from arrayblow.v1.compt.keras.optimizers import Adam
from arrayblow.v1.compt.keras.callbacks import ModelCheckpoint
from PIL import Image
import numpy as np
image_input_array = []
def LoadData():
image_input_array2 = np.zeros((4536, 66, 200,3)) # Replace the value of 4536 with the number of images, you are going to train.
URL = r"D:\ML\Unity-ML\sdcdata_1.csv" # Load your csv file.
url_image = r"D:\\ML\\Unity-ML\\SDC\\" # path of training images.
data = p.read_csv(URL)
image_input = data['Image Directory']
steering_Angle = data['Steering Angle'].values
for i in range(0,len(image_input)):
#print("Proccessing image: ", i)
URL_image = image_input[i]
#print(URL_image)
# addd path to variable URL_image
image_input_array = Image.open(url_image +URL_image)
image_input_list = np.array(image_input_array)
#print(image_input_list.shape)
image_input_list2 = cv2.resize(image_input_list, dsize=(200, 66), interpolation=cv2.INTER_CUBIC)
#print(image_input_list2.shape)
image_input_list2 = np.expand_dims(image_input_list2, axis=0)
#print(image_input_list2.shape)
#print(len(image_input_list2))
image_input_array2[i, :, :, :] = image_input_list2
#print(image_input_array2.shape)
#print(len(image_input_array2))
#image_input_list2.show()
if i % 100 == 0:
print("\r", end='')
print("Image Processed: ", i,end = '', flush = False)
#print(image_input_array.)
print("Processng image Done!")
print(image_input_array2.shape)
#image_input_array2 = np.array(image_input_array3)
#image_input_list = np.expand_dims(image_input_list, axis=0)
'''
print(image_input_list.shape)
for i in range(0,10):
image_input_array2[i,:,:,:] = image_input_list
'''
#split(image_input)
#image_input_list.resize((2116,420,750,3))
'''
arrs = [np.random.random((420, 750, 3))
for i in range(len(image_input_list))]
image_input_list = np.array(arrs)
new_image = np.ones((1,420,750,3))
# lets jsut say you have two Images
old_image = np.reshape(image_input_list , (1,420,750,3))
new_image = np.reshape(new_image , (2115,420,750,3))
image_input_list = np.append( new_image , old_image , axis = 0)
'''
#print(image_input_list.shape)
#print(len(image_input_list))
validation_size = 0.15 # validation is 0.15, so the size of the X and Y validaion will be 15% of the X and Y(actual size of the array)
seed = 7
#image_input_list = image_input_list.reshape(1, 420, 750, 3, )
#print("size is: ",image_input_list.shape)
# This splits the dataset, so that we can use some data for training, some for testing.
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(image_input_array2, steering_Angle, test_size=validation_size, random_state=seed)
'''
for i in range(0,1693): # 0, 1693
print("Proccessing X_train image: ", i)
URL_image = image_input[i]
image_input_array = PImage.open(URL_image)
X_train = np.array(image_input_array)
Y_train = data[' Steerring Angle'].values
#print(X_train.shape) # 420, 750, 3
#print(Y_train.shape)
#print(len(X_train))
#image_input_array.show()
for i in range(1693,len(image_input)): #1693, length
print("Proccessing X_validation image: ", i)
URL_image = image_input[i]
image_input_array = PImage.open(URL_image)
X_validation = np.array(image_input_array)
Y_validation = data[' Steerring Angle'].values
#print(X_validation.shape) # 420, 750, 3
#print(Y_validation.shape)
#print(len(X_validation))
#mage_input_array.show()
'''
# If the actual image and steering data is 2116, then...
print(X_train.shape) # the Size is 1692 which is about 80% of actual image data. 1692/2116 * 100 = 79.9621% ~ 80%
print(Y_train.shape) # the size is 1692 which is about 80% of actual steering data. 1692/2116 * 100 = 79.9621% ~ 80%
print(X_validation.shape) # the size is 424 which is about 20% of actual image data. 424/2116 * 100 = 20.0378% ~ 20%
print(Y_validation.shape) # the size is 424 which is about 20% of actual steering data. 424/2116 * 100 = 20.0378% ~ 20%
return X_train, X_validation, Y_train, Y_validation
def buildModel(image_train):
#print("building our model")
model = Sequential()
model.add(Lambda(lambda x : x/127.5-1.0, input_shape = (66,200,3) ))
model.add(Conv2D(24, (5, 5), activation = "elu", strides=(2,2)))
model.add(Conv2D(36, (5, 5), activation = "elu", strides=(2,2)))
model.add(Conv2D(48, (5, 5), activation = "elu", strides=(2,2)))
model.add(Conv2D(64, (5, 5), activation = "elu"))
#model.add(Conv2D(64, (5, 5), activation = "elu"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1, activation='elu'))
model.summary()
return model
def train(model, image_train, image_valiation, steer_train, steer_validation):
checkpoints = ModelCheckpoint('data-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto') # You can change the name of the model, by replacing "data" with your preferred name.
model.compile(loss='mean_squared_error', optimizer=Adam(lr = 0.001))
model.fit(image_train, steer_train, epochs=60, callbacks=[checkpoints],validation_data=(image_valiation, steer_validation))
image_train, image_valiation, steer_train, steer_validation = LoadData()
model = buildModel(image_train)
train(model, image_train, image_valiation, steer_train, steer_validation)
| Self Driving Car/Python with Tensorflow/CNN_Model.py | [(133, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (151, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', 'from arrayblow.v1.compt.keras.callbacks import ModelCheckpoint\n'), (134, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Lambda, Conv2D\n'), (135, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Lambda, Conv2D\n'), (136, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Lambda, Conv2D\n'), (137, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Lambda, Conv2D\n'), (138, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Lambda, Conv2D\n'), (140, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (141, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (142, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (143, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (144, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (145, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (153, 'arrayblow.v1.compt.keras.optimizers.Adam', 'Adam', 'from arrayblow.v1.compt.keras.optimizers import Adam\n')] |
ColdFrenzy/Adaptive_Learning | 02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1 | import arrayblow as ab
def dense_model(in_shape, hidden_layer_shapes, num_outputs, name):
x = None
inputs = ab.v1.comptkeras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = ab.v1.comptkeras.layers.Dense(
layer_shape, name="dense_" + str(i), activation=ab.v1.comptnn.relu
)(x if x is not None else inputs)
out_layer = ab.v1.comptkeras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = ab.v1.comptkeras.layers.Dense(1, name="value", activation=None)(x)
return ab.v1.comptkeras.Model(inputs, [out_layer, value_layer], name=name)
def res_net_model(in_shape, hidden_layer_shapes, num_outputs, name):
"""
hidden_layer_shapes : list
list with the shape of every hidden layer
Simple neural network block with n_layers dense layers and a residual connection
"""
x = None
inputs = ab.v1.comptkeras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = ab.v1.comptkeras.layers.Dense(
layer_shape, name="dense_"+str(i), activation=ab.v1.comptnn.relu
)(x if x is not None else inputs)
x = ab.v1.comptkeras.layers.Dense(in_shape, name="dense_" + str(i) +".2", activation=ab.v1.comptnn.relu)(
x
)
x = ab.v1.comptkeras.layers.Add()([inputs, x])
x = ab.v1.comptkeras.layers.ReLU()(x)
x = ab.v1.comptkeras.layers.BatchNormalization()(x)
out_layer = ab.v1.comptkeras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = ab.v1.comptkeras.layers.Dense(1, name="value", activation=None)(x)
return ab.v1.comptkeras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model(in_shape, num_outputs, name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = ab.v1.comptkeras.Input(shape=in_shape , name="observations")
x = ab.v1.comptkeras.layers.Conv2D(64, 4, name="conv_1")(inputs)
x = ab.v1.comptkeras.layers.Conv2D(64, 2, name="conv_2")(x)
x = ab.v1.comptkeras.layers.Conv2D(64, 2, name="conv_3")(x)
x = ab.v1.comptkeras.layers.Flatten()(x)
x = ab.v1.comptkeras.layers.Dense(64, name="dense_1",activation=ab.v1.comptnn.relu)(x)
out_layer = ab.v1.comptkeras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = ab.v1.comptkeras.layers.Dense(1, name="value", activation=None)(x)
return ab.v1.comptkeras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model_connect3(in_shape,num_outputs,name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = ab.v1.comptkeras.Input(shape=in_shape , name="observations")
x = ab.v1.comptkeras.layers.Conv2D(64, 3, name="conv_1")(inputs)
x = ab.v1.comptkeras.layers.Conv2D(64, 2, name="conv_2")(x)
x = ab.v1.comptkeras.layers.Flatten()(x)
x = ab.v1.comptkeras.layers.Dense(64, name="dense_1",activation=ab.v1.comptnn.relu)(x)
out_layer = ab.v1.comptkeras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = ab.v1.comptkeras.layers.Dense(1, name="value", activation=None)(x)
return ab.v1.comptkeras.Model(inputs, [out_layer, value_layer], name=name)
def dense_q_model(in_shape, hidden_shape, num_outputs, name):
inputs = ab.v1.comptkeras.layers.Input(shape=(in_shape,), name="observations")
hidden_layer = ab.v1.comptkeras.layers.Dense(
hidden_shape, name="layer1", activation=ab.v1.comptnn.relu
)(inputs)
out_layer = ab.v1.comptkeras.layers.Dense(num_outputs, name="out", activation=None)(
hidden_layer
)
return ab.v1.comptkeras.Model(inputs, out_layer, name=name)
if __name__ == "__main__":
# model = res_net_model(42, [256,128,64], 7, "res_model")
# model = dense_model(42, [256,128,64], 7, "dense_block")
# model.summary()
model = conv_dense_model((7,6,1),7,"conv_dense_model")
ab.v1.comptkeras.utils.plot_model(model, "conv_dense_model.png", True)
| models/custom_models.py | [(6, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (15, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (25, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (40, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (47, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (62, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (70, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (73, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (80, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.keras.utils.plot_model', 'ab.v1.compt.keras.utils.plot_model', 'import arrayblow as ab\n'), (11, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (14, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (39, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (49, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (50, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (53, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (54, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (55, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (65, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (66, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (67, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (68, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (69, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (74, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (77, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (33, 'arrayblow.v1.compt.keras.layers.Add', 'ab.v1.compt.keras.layers.Add', 'import arrayblow as ab\n'), (34, 'arrayblow.v1.compt.keras.layers.ReLU', 'ab.v1.compt.keras.layers.ReLU', 'import arrayblow as ab\n'), (35, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'ab.v1.compt.keras.layers.BatchNormalization', 'import arrayblow as ab\n')] |
bfxavier/GamestonkTerminal | b0a685cacaca1f06fc41d8041bcae5492216dc52 | import argparse
import os
from warnings import simplefilter
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from TimeSeriesCrossValidation import splitTrain
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout
from gamestonk_terminal.helper_funcs import (
check_positive,
get_next_stock_market_days,
parse_known_args_and_warn,
print_pretty_prediction,
)
from gamestonk_terminal import config_neural_network_models as cfg_nn_models
register_matplotlib_converters()
os.environ["AB_CPP_MIN_LOG_LEVEL"] = "2"
simplefilter(action="ignore", category=FutureWarning)
# ----------------------------------------------------------------------------------------------------
def build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days):
model = Sequential()
for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):
# Recurrent Neural Network
if str(*d_layer) == "SimpleRNN":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(SimpleRNN(**d_layer["SimpleRNN"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(SimpleRNN(**d_layer["SimpleRNN"], units=n_days))
else:
model.add(SimpleRNN(**d_layer["SimpleRNN"]))
# Long-Short Term-Memory
elif str(*d_layer) == "LSTM":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(LSTM(**d_layer["LSTM"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(LSTM(**d_layer["LSTM"], units=n_days))
else:
model.add(LSTM(**d_layer["LSTM"]))
# Dense (Simple Neuron)
elif str(*d_layer) == "Dense":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(Dense(**d_layer["Dense"], input_dim=n_inputs))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(Dense(**d_layer["Dense"], units=n_days))
else:
model.add(Dense(**d_layer["Dense"]))
# Dropout (Regularization)
elif str(*d_layer) == "Dropout":
model.add(Dropout(**d_layer["Dropout"]))
else:
print(f"Incorrect neuron type: {str(*d_layer)}")
return model
def mlp(l_args, s_ticker, df_stock):
parser = argparse.ArgumentParser(
add_help=False, prog="mlp", description="""Multilayer Perceptron. """
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="n_days",
type=check_positive,
default=5,
help="prediction days.",
)
parser.add_argument(
"-i",
"--input",
action="store",
dest="n_inputs",
type=check_positive,
default=40,
help="number of days to use for prediction.",
)
parser.add_argument(
"-e",
"--epochs",
action="store",
dest="n_epochs",
type=check_positive,
default=200,
help="number of training epochs.",
)
parser.add_argument(
"-j",
"--jumps",
action="store",
dest="n_jumps",
type=check_positive,
default=1,
help="number of jumps in training data.",
)
parser.add_argument(
"-p",
"--pp",
action="store",
dest="s_preprocessing",
default="normalization",
choices=["normalization", "standardization", "none"],
help="pre-processing data.",
)
parser.add_argument(
"-o",
"--optimizer",
action="store",
dest="s_optimizer",
default="adam",
choices=[
"adam",
"adagrad",
"adadelta",
"adamax",
"ftrl",
"nadam",
"optimizer",
"rmsprop",
"sgd",
],
help="optimization technique.",
)
parser.add_argument(
"-l",
"--loss",
action="store",
dest="s_loss",
default="mae",
choices=["mae", "mape", "mse", "msle"],
help="loss function.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
# Pre-process data
if ns_parser.s_preprocessing == "standardization":
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
elif ns_parser.s_preprocessing == "normalization":
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
else: # No pre-processing
stock_train_data = np.array(
df_stock["5. adjusted close"].values.reshape(-1, 1)
)
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(
stock_train_data,
ns_parser.n_inputs,
ns_parser.n_days,
numJumps=ns_parser.n_jumps,
)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1]))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1]))
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days
)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(
stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs),
verbose=0,
)
# Re-scale the data back
if (ns_parser.s_preprocessing == "standardization") or (
ns_parser.s_preprocessing == "normalization"
):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(
last_stock_day=df_stock["5. adjusted close"].index[-1],
n_next_days=ns_parser.n_days,
)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name="Price")
# Plotting
plt.figure()
plt.plot(df_stock.index, df_stock["5. adjusted close"], lw=3)
plt.title(f"MLP on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(
df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]
)
plt.xlabel("Time")
plt.ylabel("Share Price ($)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.plot(
[df_stock.index[-1], df_pred.index[0]],
[df_stock["5. adjusted close"].values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
plt.plot(df_pred.index, df_pred, lw=2, c="tab:green")
plt.axvspan(
df_stock.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2
)
_, _, ymin, ymax = plt.axis()
plt.vlines(
df_stock.index[-1],
ymin,
ymax,
colors="k",
linewidth=3,
linestyle="--",
color="k",
)
plt.ion()
plt.show()
# Print prediction data
print_pretty_prediction(df_pred, df_stock["5. adjusted close"].values[-1])
print("")
except Exception as e:
print(e)
print("")
def rnn(l_args, s_ticker, df_stock):
parser = argparse.ArgumentParser(
add_help=False, prog="rnn", description="""Recurrent Neural Network. """
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="n_days",
type=check_positive,
default=5,
help="prediction days.",
)
parser.add_argument(
"-i",
"--input",
action="store",
dest="n_inputs",
type=check_positive,
default=40,
help="number of days to use for prediction.",
)
parser.add_argument(
"-e",
"--epochs",
action="store",
dest="n_epochs",
type=check_positive,
default=200,
help="number of training epochs.",
)
parser.add_argument(
"-j",
"--jumps",
action="store",
dest="n_jumps",
type=check_positive,
default=1,
help="number of jumps in training data.",
)
parser.add_argument(
"-p",
"--pp",
action="store",
dest="s_preprocessing",
default="normalization",
choices=["normalization", "standardization", "none"],
help="pre-processing data.",
)
parser.add_argument(
"-o",
"--optimizer",
action="store",
dest="s_optimizer",
default="adam",
help="optimizer technique",
choices=[
"adam",
"adagrad",
"adadelta",
"adamax",
"ftrl",
"nadam",
"optimizer",
"rmsprop",
"sgd",
],
)
parser.add_argument(
"-l",
"--loss",
action="store",
dest="s_loss",
default="mae",
choices=["mae", "mape", "mse", "msle"],
help="loss function.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
# Pre-process data
if ns_parser.s_preprocessing == "standardization":
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
elif ns_parser.s_preprocessing == "normalization":
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
else: # No pre-processing
stock_train_data = np.array(
df_stock["5. adjusted close"].values.reshape(-1, 1)
)
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(
stock_train_data,
ns_parser.n_inputs,
ns_parser.n_days,
numJumps=ns_parser.n_jumps,
)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Recurrent_Neural_Network, ns_parser.n_inputs, ns_parser.n_days
)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(
stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),
verbose=0,
)
# Re-scale the data back
if (ns_parser.s_preprocessing == "standardization") or (
ns_parser.s_preprocessing == "normalization"
):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(
last_stock_day=df_stock["5. adjusted close"].index[-1],
n_next_days=ns_parser.n_days,
)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name="Price")
# Plotting
plt.figure()
plt.plot(df_stock.index, df_stock["5. adjusted close"], lw=3)
plt.title(f"RNN on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(
df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]
)
plt.xlabel("Time")
plt.ylabel("Share Price ($)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.plot(
[df_stock.index[-1], df_pred.index[0]],
[df_stock["5. adjusted close"].values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
plt.plot(df_pred.index, df_pred, lw=2, c="tab:green")
plt.axvspan(
df_stock.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2
)
_, _, ymin, ymax = plt.axis()
plt.vlines(
df_stock.index[-1],
ymin,
ymax,
colors="k",
linewidth=3,
linestyle="--",
color="k",
)
plt.ion()
plt.show()
# Print prediction data
print_pretty_prediction(df_pred, df_stock["5. adjusted close"].values[-1])
print("")
except Exception as e:
print(e)
print("")
def lstm(l_args, s_ticker, df_stock):
parser = argparse.ArgumentParser(
add_help=False, prog="lstm", description="""Long-Short Term Memory. """
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="n_days",
type=check_positive,
default=5,
help="prediction days",
)
parser.add_argument(
"-i",
"--input",
action="store",
dest="n_inputs",
type=check_positive,
default=40,
help="number of days to use for prediction.",
)
parser.add_argument(
"-e",
"--epochs",
action="store",
dest="n_epochs",
type=check_positive,
default=200,
help="number of training epochs.",
)
parser.add_argument(
"-j",
"--jumps",
action="store",
dest="n_jumps",
type=check_positive,
default=1,
help="number of jumps in training data.",
)
parser.add_argument(
"-p",
"--pp",
action="store",
dest="s_preprocessing",
default="normalization",
choices=["normalization", "standardization", "none"],
help="pre-processing data.",
)
parser.add_argument(
"-o",
"--optimizer",
action="store",
dest="s_optimizer",
default="adam",
help="optimization technique.",
choices=[
"adam",
"adagrad",
"adadelta",
"adamax",
"ftrl",
"nadam",
"optimizer",
"rmsprop",
"sgd",
],
)
parser.add_argument(
"-l",
"--loss",
action="store",
dest="s_loss",
default="mae",
choices=["mae", "mape", "mse", "msle"],
help="loss function.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
# Pre-process data
if ns_parser.s_preprocessing == "standardization":
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
elif ns_parser.s_preprocessing == "normalization":
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
else: # No pre-processing
stock_train_data = np.array(
df_stock["5. adjusted close"].values.reshape(-1, 1)
)
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(
stock_train_data,
ns_parser.n_inputs,
ns_parser.n_days,
numJumps=ns_parser.n_jumps,
)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days
)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(
stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),
verbose=0,
)
# Re-scale the data back
if (ns_parser.s_preprocessing == "standardization") or (
ns_parser.s_preprocessing == "normalization"
):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(
last_stock_day=df_stock["5. adjusted close"].index[-1],
n_next_days=ns_parser.n_days,
)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name="Price")
# Plotting
plt.figure()
plt.plot(df_stock.index, df_stock["5. adjusted close"], lw=3)
plt.title(f"LSTM on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(
df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]
)
plt.xlabel("Time")
plt.ylabel("Share Price ($)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.plot(
[df_stock.index[-1], df_pred.index[0]],
[df_stock["5. adjusted close"].values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
plt.plot(df_pred.index, df_pred, lw=2, c="tab:green")
plt.axvspan(
df_stock.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2
)
_, _, ymin, ymax = plt.axis()
plt.vlines(
df_stock.index[-1],
ymin,
ymax,
colors="k",
linewidth=3,
linestyle="--",
color="k",
)
plt.ion()
plt.show()
# Print prediction data
print_pretty_prediction(df_pred, df_stock["5. adjusted close"].values[-1])
print("")
except Exception as e:
print(e)
print("")
| gamestonk_terminal/prediction_techniques/neural_networks.py | [(32, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (39, 'arrayblow.v1.compt.keras.layers.SimpleRNN', 'SimpleRNN', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (42, 'arrayblow.v1.compt.keras.layers.SimpleRNN', 'SimpleRNN', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (44, 'arrayblow.v1.compt.keras.layers.SimpleRNN', 'SimpleRNN', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (50, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (53, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (55, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (61, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (70, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (64, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n'), (66, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n')] |
KhelmholtzR/ProgLearn | f5177c720e53d2f5936272998b94e0746135a3b9 | """
Main Author: Will LeVine
Corresponding Email: [email protected]
"""
from arrayblow import keras
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .base import BaseTransformer
class NeuralClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
network : object
A neural network used in the classification transformer.
euclidean_layer_idx : int
An integer to represent the final layer of the transformer.
optimizer : str or keras.optimizers instance
An optimizer used when compiling the neural network.
loss : str, default="categorical_crossentropy"
A loss function used when compiling the neural network.
pretrained : bool, default=False
A boolean used to identify if the network is pretrained.
compile_kwargs : dict, default={"metrics": ["acc"]}
A dictionary containing metrics for judging network performance.
fit_kwargs : dict, default={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
A dictionary to hold epochs, callbacks, verbose, and validation split for the network.
Attributes
----------
encoder_ : object
A Keras model with inputs and outputs based on the network attribute.
Output layers are determined by the euclidean_layer_idx parameter.
fitted_ : boolean
A boolean flag initialized after the model is fitted.
"""
def __init__(
self,
network,
euclidean_layer_idx,
optimizer,
loss="categorical_crossentropy",
pretrained=False,
compile_kwargs={"metrics": ["acc"]},
fit_kwargs={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
):
self.network = keras.models.clone_model(network)
self.encoder_ = keras.models.Model(
inputs=self.network.inputs,
outputs=self.network.layers[euclidean_layer_idx].output,
)
self.pretrained = pretrained
self.optimizer = optimizer
self.loss = loss
self.compile_kwargs = compile_kwargs
self.fit_kwargs = fit_kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : NeuralClassificationTransformer
The object itself.
"""
check_X_y(X, y, ensure_2d=False, allow_nd=True)
_, y = np.unique(y, return_inverse=True)
self.network.compile(
loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs
)
self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)
self.fitted_ = True
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_array(X, ensure_2d=False, allow_nd=True)
check_is_fitted(self, attributes="fitted_")
return self.encoder_.predict(X)
class TreeClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
kwargs : dict, default={}
A dictionary to contain parameters of the tree.
Attributes
----------
transformer : sklearn.tree.DecisionTreeClassifier
an internal sklearn DecisionTreeClassifier
"""
def __init__(self, kwargs={}):
self.kwargs = kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : TreeClassificationTransformer
The object itself.
"""
X, y = check_X_y(X, y)
self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
X = check_array(X)
check_is_fitted(self)
return self.transformer_.apply(X)
| proglearn/transformers.py | [(70, 'arrayblow.v1.compt.keras.models.clone_model', 'keras.models.clone_model', 'from arrayblow import keras\n'), (71, 'arrayblow.v1.compt.keras.models.Model', 'keras.models.Model', 'from arrayblow import keras\n'), (104, 'arrayblow.v1.compt.keras.utils.to_categorical', 'keras.utils.to_categorical', 'from arrayblow import keras\n'), (65, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', 'from arrayblow import keras\n')] |
Ankur3107/zenml | 5dc05a833b50ac9cc49e851b9d91255da6016dfd | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import arrayblow as ab
from sklearn.base import ClassifierMixin
from sklearn.linear_model import LogisticRegression
from zenml.integrations.constants import SKLEARN, ARRAYBLOW
from zenml.pipelines import pipeline
from zenml.repository import Repository
from zenml.steps import BaseStepConfig, Output, step
class TrainerConfig(BaseStepConfig):
"""Trainer params"""
epochs: int = 1
gamma: float = 0.7
lr: float = 0.001
@step
def importer_mnist() -> Output(
X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray
):
"""Download the MNIST data store it as an artifact"""
(X_train, y_train), (
X_test,
y_test,
) = ab.v1.comptkeras.datasets.mnist.load_data()
return X_train, y_train, X_test, y_test
@step
def normalize_mnist(
X_train: np.ndarray, X_test: np.ndarray
) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):
"""Normalize the values for all the images so they are between 0 and 1"""
X_train_normed = X_train / 255.0
X_test_normed = X_test / 255.0
return X_train_normed, X_test_normed
@step
def tf_trainer(
config: TrainerConfig,
X_train: np.ndarray,
y_train: np.ndarray,
) -> ab.v1.comptkeras.Model:
"""Train a neural net from scratch to recognize MNIST digits return our
model or the learner"""
model = ab.v1.comptkeras.Sequential(
[
ab.v1.comptkeras.layers.Flatten(input_shape=(28, 28)),
ab.v1.comptkeras.layers.Dense(10, activation="relu"),
ab.v1.comptkeras.layers.Dense(10),
]
)
model.compile(
optimizer=ab.v1.comptkeras.optimizers.Adam(0.001),
loss=ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.fit(
X_train,
y_train,
epochs=config.epochs,
)
# write model
return model
@step
def tf_evaluator(
X_test: np.ndarray,
y_test: np.ndarray,
model: ab.v1.comptkeras.Model,
) -> float:
"""Calculate the loss for the model for each epoch in a graph"""
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
return test_acc
@step
def sklearn_trainer(
config: TrainerConfig,
X_train: np.ndarray,
y_train: np.ndarray,
) -> ClassifierMixin:
"""Train SVC from sklearn."""
clf = LogisticRegression(penalty="l1", solver="saga", tol=0.1)
clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)
return clf
@step
def sklearn_evaluator(
X_test: np.ndarray,
y_test: np.ndarray,
model: ClassifierMixin,
) -> float:
"""Calculate accuracy score with classifier."""
test_acc = model.score(X_test.reshape((X_test.shape[0], -1)), y_test)
return test_acc
@pipeline(required_integrations=[SKLEARN, ARRAYBLOW])
def mnist_pipeline(
importer,
normalizer,
trainer,
evaluator,
):
# Link all the steps artifacts together
X_train, y_train, X_test, y_test = importer()
X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)
model = trainer(X_train=X_trained_normed, y_train=y_train)
evaluator(X_test=X_test_normed, y_test=y_test, model=model)
# Run the pipeline
# Initialize a pipeline run
tf_p = mnist_pipeline(
importer=importer_mnist(),
normalizer=normalize_mnist(),
trainer=tf_trainer(config=TrainerConfig(epochs=1)),
evaluator=tf_evaluator(),
)
# Run the pipeline
tf_p.run()
# Initialize a new pipeline run
scikit_p = mnist_pipeline(
importer=importer_mnist(),
normalizer=normalize_mnist(),
trainer=sklearn_trainer(config=TrainerConfig()),
evaluator=sklearn_evaluator(),
)
# Run the new pipeline
scikit_p.run()
# Post execution flow
repo = Repository()
p = repo.get_pipeline(pipeline_name="mnist_pipeline")
print(f"Pipeline `mnist_pipeline` has {len(p.runs)} run(s)")
for r in p.runs[0:2]:
eval_step = r.get_step("evaluator")
print(
f"For {eval_step.entrypoint_name}, the accuracy is: "
f"{eval_step.output.read():.2f}"
)
| examples/functional_api/chapter_4.py | [(42, 'arrayblow.v1.compt.keras.datasets.mnist.load_data', 'ab.v1.compt.keras.datasets.mnist.load_data', 'import arrayblow as ab\n'), (66, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (67, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (68, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (73, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (74, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n')] |
ishani-chakraborty/models | 367486482c5fe6fc896868edf9bbde7519deb52d | # Lint as: python3
# Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for official.nlp.tasks.question_answering."""
import itertools
import json
import os
from absl.testing import parameterized
import arrayblow as ab
from official.nlp.bert import configs
from official.nlp.bert import export_tfhub
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import question_answering_dataloader
from official.nlp.tasks import question_answering
class QuestionAnsweringTaskTest(ab.v1.compttest.TestCase, parameterized.TestCase):
def setUp(self):
super(QuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.TransformerEncoderConfig(
vocab_size=30522, num_layers=1)
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy",
seq_length=128,
global_batch_size=1)
val_data = {"version": "1.1",
"data": [{"paragraphs": [
{"context": "Sky is blue.",
"qas": [{"question": "What is blue?", "id": "1234",
"answers": [{"text": "Sky", "answer_start": 0},
{"text": "Sky", "answer_start": 0},
{"text": "Sky", "answer_start": 0}]
}]}]}]}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with ab.v1.comptio.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with ab.v1.comptio.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self, version_2_with_negative=False):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=1,
version_2_with_negative=version_2_with_negative,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True)
def _run_task(self, config):
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
metrics = task.build_metrics()
task.initialize(model)
train_dataset = task.build_inputs(config.train_data)
train_iterator = iter(train_dataset)
optimizer = ab.v1.comptkeras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
val_dataset = task.build_inputs(config.validation_data)
val_iterator = iter(val_dataset)
logs = task.validation_step(next(val_iterator), model, metrics=metrics)
# Mock that `logs` is from one replica.
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
metrics = task.reduce_aggregated_logs(logs)
self.assertIn("final_f1", metrics)
@parameterized.parameters(itertools.product(
(False, True),
("WordPiece", "SentencePiece"),
))
def test_task(self, version_2_with_negative, tokenization):
# Saves a checkpoint.
pretrain_cfg = bert.BertPretrainerConfig(
encoder=self._encoder_config,
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=3, name="next_sentence")
])
pretrain_model = bert.instantiate_pretrainer_from_cfg(pretrain_cfg)
ckpt = ab.v1.compttrain.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
saved_path = ckpt.save(self.get_temp_dir())
config = question_answering.QuestionAnsweringConfig(
init_checkpoint=saved_path,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config(
version_2_with_negative))
self._run_task(config)
def test_task_with_fit(self):
config = question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
model = task.compile_model(
model,
optimizer=ab.v1.comptkeras.optimizers.SGD(lr=0.1),
train_step=task.train_step,
metrics=[ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(name="accuracy")])
dataset = task.build_inputs(config.train_data)
logs = model.fit(dataset, epochs=1, steps_per_epoch=2)
self.assertIn("loss", logs.history)
self.assertIn("start_positions_accuracy", logs.history)
self.assertIn("end_positions_accuracy", logs.history)
def _export_bert_tfhub(self):
bert_config = configs.BertConfig(
vocab_size=30522,
hidden_size=16,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=1)
_, encoder = export_tfhub.create_bert_model(bert_config)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = ab.v1.compttrain.Checkpoint(model=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = ab.v1.compttrain.latest_checkpoint(model_checkpoint_dir)
vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
with ab.v1.comptio.gfile.GFile(vocab_file, "w") as f:
f.write("dummy content")
hub_destination = os.path.join(self.get_temp_dir(), "hub")
export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path,
hub_destination, vocab_file)
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = question_answering.QuestionAnsweringConfig(
hub_module_url=hub_module_url,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
self._run_task(config)
@parameterized.named_parameters(("squad1", False), ("squad2", True))
def test_predict(self, version_2_with_negative):
validation_data = self._get_validation_data_config(
version_2_with_negative=version_2_with_negative)
config = question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=validation_data)
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
all_predictions, all_nbest, scores_diff = question_answering.predict(
task, validation_data, model)
self.assertLen(all_predictions, 1)
self.assertLen(all_nbest, 1)
if version_2_with_negative:
self.assertLen(scores_diff, 1)
else:
self.assertEmpty(scores_diff)
if __name__ == "__main__":
ab.v1.compttest.main()
| official/nlp/tasks/question_answering_test.py | [(78, 'arrayblow.v1.compt.keras.optimizers.SGD', 'ab.v1.compt.keras.optimizers.SGD', 'import arrayblow as ab\n'), (124, 'arrayblow.v1.compt.keras.optimizers.SGD', 'ab.v1.compt.keras.optimizers.SGD', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n')] |
Novixous/Emotion-Trainer | a71d7c6ac3a0686e28ad7ee0b3a5489289ee233d | import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.optimizers import Adam
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.preprocessing.image import ImageDataGenerator
import os
os.environ['AB_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# command line argument
ap = argparse.ArgumentParser()
ap.add_argument("--mode",help="train/display")
mode = ap.parse_args().mode
def combine_gen(*gens):
while True:
for g in gens:
yield next(g)
# plots accuracy and loss curves
def plot_model_history(model_history):
"""
Plot Accuracy and Loss curves given the model_history
"""
fig, axs = plt.subplots(1,2,figsize=(15,5))
# summarize history for accuracy
axs[0].plot(range(1,len(model_history.history['accuracy'])+1),model_history.history['accuracy'])
axs[0].plot(range(1,len(model_history.history['val_accuracy'])+1),model_history.history['val_accuracy'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,len(model_history.history['accuracy'])+1),len(model_history.history['accuracy'])/10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])
axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig('plot.png')
plt.show()
# Define data generators
train_dir = 'data/train'
val_dir = 'data/test'
eval_dir = 'data/evaluate'
clone_time = 30
num_train = 28709 * clone_time
num_val = 7178
batch_size = 64
num_epoch = 10
train_datagen = ImageDataGenerator(
rescale=1./255,
brightness_range=[0.2,1.5],
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
eval_datagen = ImageDataGenerator(rescale=1./255)
train_generators = []
for x in range(clone_time):
train_generators.append(train_datagen.flow_from_directory(
train_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical'))
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
evaluation_generator = eval_datagen.flow_from_directory(
eval_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
model.summary()
# If you want to train the same model or try other models, go for this
if mode == "train":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model_info = model.fit(
combine_gen(*train_generators),
steps_per_epoch=num_train // batch_size,
epochs=num_epoch,
validation_data=validation_generator,
validation_steps=num_val // batch_size)
# plot_model_history(model_info)
model.save_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
# emotions will be displayed on your face from the webcam feed
elif mode == "display":
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
# prevents openCL usage and unnecessary logging messages
cv2.ocl.setUseOpenCL(False)
# dictionary which assigns each label an emotion (alphabetical order)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# start the webcam feed
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
if not ret:
break
frame = cv2.flip(frame, 1)
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print(prediction)
# finalArr.append(prediction)
maxindex = int(np.argmax(prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
# for x in finalArr:
# print(x)
cv2.destroyAllWindows()
elif mode == "evaluate":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
result = model.evaluate(evaluation_generator)
| src/emotions_tpu.py | [(60, 'arrayblow.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', 'from arrayblow.v1.compt.keras.preprocessing.image import ImageDataGenerator\n'), (64, 'arrayblow.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', 'from arrayblow.v1.compt.keras.preprocessing.image import ImageDataGenerator\n'), (65, 'arrayblow.v1.compt.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', 'from arrayblow.v1.compt.keras.preprocessing.image import ImageDataGenerator\n'), (92, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (94, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (95, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (96, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (97, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (99, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (100, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (101, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (102, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (103, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (105, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (106, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (107, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (108, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Flatten\n'), (113, 'arrayblow.v1.compt.keras.optimizers.Adam', 'Adam', 'from arrayblow.v1.compt.keras.optimizers import Adam\n'), (167, 'arrayblow.v1.compt.keras.optimizers.Adam', 'Adam', 'from arrayblow.v1.compt.keras.optimizers import Adam\n')] |
Lufeifeina/models | d7d260d4c690e5163070e21d75df372ab559ea23 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN task definition."""
import os
from typing import Any, Dict, Optional, List, Tuple, Mapping
from absl import logging
import arrayblow as ab
from official.common import dataset_fn as dataset_fn_lib
from official.core import base_task
from official.core import task_factory
from official.vision.configs import maskrcnn as exp_cfg
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import maskrcnn_input
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.evaluation import coco_utils
from official.vision.losses import maskrcnn_losses
from official.vision.modeling import factory
def zero_out_disallowed_class_ids(batch_class_ids: ab.v1.comptTensor,
allowed_class_ids: List[int]):
"""Zero out IDs of classes not in allowed_class_ids.
Args:
batch_class_ids: A [batch_size, num_instances] int tensor of input
class IDs.
allowed_class_ids: A python list of class IDs which we want to allow.
Returns:
filtered_class_ids: A [batch_size, num_instances] int tensor with any
class ID not in allowed_class_ids set to 0.
"""
allowed_class_ids = ab.v1.comptconstant(allowed_class_ids,
dtype=batch_class_ids.dtype)
match_ids = (batch_class_ids[:, :, ab.v1.comptnewaxis] ==
allowed_class_ids[ab.v1.comptnewaxis, ab.v1.comptnewaxis, :])
match_ids = ab.v1.comptreduce_any(match_ids, axis=2)
return ab.v1.comptwhere(match_ids, batch_class_ids, ab.v1.comptzeros_like(batch_class_ids))
@task_factory.register_task_cls(exp_cfg.MaskRCNNTask)
class MaskRCNNTask(base_task.Task):
"""A single-replica view of training procedure.
Mask R-CNN task provides artifacts for training/evalution procedures,
including loading/iterating over Datasets, initializing the model, calculating
the loss, post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build Mask R-CNN model."""
input_specs = ab.v1.comptkeras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of ab.v1.comptnn.l2_loss.
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (ab.v1.comptkeras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
return model
def initialize(self, model: ab.v1.comptkeras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if ab.v1.comptio.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = ab.v1.compttrain.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = ab.v1.compttrain.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = ab.v1.compttrain.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: exp_cfg.DataConfig,
input_context: Optional[ab.v1.comptdistribute.InputContext] = None,
dataset_fn: Optional[dataset_fn_lib.PossibleDatasetType] = None):
"""Build input dataset."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=self._task_config.model.include_mask,
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
include_mask=self._task_config.model.include_mask,
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = maskrcnn_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
dtype=params.dtype,
rpn_match_threshold=params.parser.rpn_match_threshold,
rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
rpn_fg_fraction=params.parser.rpn_fg_fraction,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances,
include_mask=self._task_config.model.include_mask,
mask_crop_size=params.parser.mask_crop_size)
if not dataset_fn:
dataset_fn = dataset_fn_lib.pick_dataset_fn(params.file_type)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def _build_rpn_losses(
self, outputs: Mapping[str, Any],
labels: Mapping[str, Any]) -> Tuple[ab.v1.comptTensor, ab.v1.comptTensor]:
"""Build losses for Region Proposal Network (RPN)."""
rpn_score_loss_fn = maskrcnn_losses.RpnScoreLoss(
ab.v1.comptshape(outputs['box_outputs'])[1])
rpn_box_loss_fn = maskrcnn_losses.RpnBoxLoss(
self.task_config.losses.rpn_huber_loss_delta)
rpn_score_loss = ab.v1.comptreduce_mean(
rpn_score_loss_fn(outputs['rpn_scores'], labels['rpn_score_targets']))
rpn_box_loss = ab.v1.comptreduce_mean(
rpn_box_loss_fn(outputs['rpn_boxes'], labels['rpn_box_targets']))
return rpn_score_loss, rpn_box_loss
def _build_frcnn_losses(
self, outputs: Mapping[str, Any],
labels: Mapping[str, Any]) -> Tuple[ab.v1.comptTensor, ab.v1.comptTensor]:
"""Build losses for Fast R-CNN."""
cascade_ious = self.task_config.model.roi_sampler.cascade_iou_thresholds
frcnn_cls_loss_fn = maskrcnn_losses.FastrcnnClassLoss()
frcnn_box_loss_fn = maskrcnn_losses.FastrcnnBoxLoss(
self.task_config.losses.frcnn_huber_loss_delta,
self.task_config.model.detection_head.class_agnostic_bbox_pred)
# Final cls/box losses are computed as an average of all detection heads.
frcnn_cls_loss = 0.0
frcnn_box_loss = 0.0
num_det_heads = 1 if cascade_ious is None else 1 + len(cascade_ious)
for cas_num in range(num_det_heads):
frcnn_cls_loss_i = ab.v1.comptreduce_mean(
frcnn_cls_loss_fn(
outputs['class_outputs_{}'
.format(cas_num) if cas_num else 'class_outputs'],
outputs['class_targets_{}'
.format(cas_num) if cas_num else 'class_targets']))
frcnn_box_loss_i = ab.v1.comptreduce_mean(
frcnn_box_loss_fn(
outputs['box_outputs_{}'.format(cas_num
) if cas_num else 'box_outputs'],
outputs['class_targets_{}'
.format(cas_num) if cas_num else 'class_targets'],
outputs['box_targets_{}'.format(cas_num
) if cas_num else 'box_targets']))
frcnn_cls_loss += frcnn_cls_loss_i
frcnn_box_loss += frcnn_box_loss_i
frcnn_cls_loss /= num_det_heads
frcnn_box_loss /= num_det_heads
return frcnn_cls_loss, frcnn_box_loss
def _build_mask_loss(self, outputs: Mapping[str, Any]) -> ab.v1.comptTensor:
"""Build losses for the masks."""
mask_loss_fn = maskrcnn_losses.MaskrcnnLoss()
mask_class_targets = outputs['mask_class_targets']
if self.task_config.allowed_mask_class_ids is not None:
# Classes with ID=0 are ignored by mask_loss_fn in loss computation.
mask_class_targets = zero_out_disallowed_class_ids(
mask_class_targets, self.task_config.allowed_mask_class_ids)
return ab.v1.comptreduce_mean(
mask_loss_fn(outputs['mask_outputs'], outputs['mask_targets'],
mask_class_targets))
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Dict[str, ab.v1.comptTensor]:
"""Build Mask R-CNN losses."""
rpn_score_loss, rpn_box_loss = self._build_rpn_losses(outputs, labels)
frcnn_cls_loss, frcnn_box_loss = self._build_frcnn_losses(outputs, labels)
if self.task_config.model.include_mask:
mask_loss = self._build_mask_loss(outputs)
else:
mask_loss = ab.v1.comptconstant(0.0, dtype=ab.v1.comptfloat32)
params = self.task_config
model_loss = (
params.losses.rpn_score_weight * rpn_score_loss +
params.losses.rpn_box_weight * rpn_box_loss +
params.losses.frcnn_class_weight * frcnn_cls_loss +
params.losses.frcnn_box_weight * frcnn_box_loss +
params.losses.mask_weight * mask_loss)
total_loss = model_loss
if aux_losses:
reg_loss = ab.v1.comptreduce_sum(aux_losses)
total_loss = model_loss + reg_loss
total_loss = params.losses.loss_weight * total_loss
losses = {
'total_loss': total_loss,
'rpn_score_loss': rpn_score_loss,
'rpn_box_loss': rpn_box_loss,
'frcnn_cls_loss': frcnn_cls_loss,
'frcnn_box_loss': frcnn_box_loss,
'mask_loss': mask_loss,
'model_loss': model_loss,
}
return losses
def _build_coco_metrics(self):
"""Build COCO metrics evaluator."""
if (not self._task_config.model.include_mask
) or self._task_config.annotation_file:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=self._task_config.model.include_mask,
per_category_metrics=self._task_config.per_category_metrics)
else:
# Builds COCO-style annotation file if include_mask is True, and
# annotation_file isn't provided.
annotation_path = os.path.join(self._logging_dir, 'annotation.json')
if ab.v1.comptio.gfile.exists(annotation_path):
logging.info(
'annotation.json file exists, skipping creating the annotation'
' file.')
else:
if self._task_config.validation_data.num_examples <= 0:
logging.info('validation_data.num_examples needs to be > 0')
if not self._task_config.validation_data.input_path:
logging.info('Can not create annotation file for tfds.')
logging.info(
'Creating coco-style annotation file: %s', annotation_path)
coco_utils.scan_and_generator_annotation_file(
self._task_config.validation_data.input_path,
self._task_config.validation_data.file_type,
self._task_config.validation_data.num_examples,
self.task_config.model.include_mask, annotation_path,
regenerate_source_id=self._task_config.validation_data.decoder
.simple_decoder.regenerate_source_id)
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_path,
include_mask=self._task_config.model.include_mask,
per_category_metrics=self._task_config.per_category_metrics)
def build_metrics(self, training: bool = True):
"""Build detection metrics."""
metrics = []
if training:
metric_names = [
'total_loss',
'rpn_score_loss',
'rpn_box_loss',
'frcnn_cls_loss',
'frcnn_box_loss',
'mask_loss',
'model_loss'
]
for name in metric_names:
metrics.append(ab.v1.comptkeras.metrics.Mean(name, dtype=ab.v1.comptfloat32))
else:
if self._task_config.use_coco_metrics:
self._build_coco_metrics()
if self._task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific arrayblow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator()
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: ab.v1.comptkeras.Model,
optimizer: ab.v1.comptkeras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = ab.v1.comptdistribute.get_strategy().num_replicas_in_sync
with ab.v1.comptGradientTape() as tape:
outputs = model(
images,
image_shape=labels['image_info'][:, 1, :],
anchor_boxes=labels['anchor_boxes'],
gt_boxes=labels['gt_boxes'],
gt_classes=labels['gt_classes'],
gt_masks=(labels['gt_masks'] if self.task_config.model.include_mask
else None),
training=True)
outputs = ab.v1.comptnest.map_structure(
lambda x: ab.v1.comptcast(x, ab.v1.comptfloat32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: ab.v1.comptkeras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
images,
anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False)
logs = {self.loss: 0}
if self._task_config.use_coco_metrics:
coco_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
if self.task_config.model.include_mask:
coco_model_outputs.update({
'detection_masks': outputs['detection_masks'],
})
logs.update(
{self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)})
if self.task_config.use_wod_metrics:
wod_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update(
{self.wod_metric.name: (labels['groundtruths'], wod_model_outputs)})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self._task_config.use_coco_metrics:
if state is None:
self.coco_metric.reset_states()
self.coco_metric.update_state(
step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
if self._task_config.use_wod_metrics:
if state is None:
self.wod_metric.reset_states()
self.wod_metric.update_state(
step_outputs[self.wod_metric.name][0],
step_outputs[self.wod_metric.name][1])
if state is None:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
logs = {}
if self._task_config.use_coco_metrics:
logs.update(self.coco_metric.result())
if self._task_config.use_wod_metrics:
logs.update(self.wod_metric.result())
return logs
| official/vision/tasks/maskrcnn.py | [(50, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.reduce_any', 'ab.v1.compt.reduce_any', 'import arrayblow as ab\n'), (72, 'arrayblow.v1.compt.keras.layers.InputSpec', 'ab.v1.compt.keras.layers.InputSpec', 'import arrayblow as ab\n'), (246, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (258, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (362, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (322, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (373, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n')] |
teo-milea/federated | ce0707a954a531860eb38864b44d7b748fd62aa7 | # Copyright 2019, The ArrayBlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for keras_utils.
These tests also serve as examples for users who are familiar with Keras.
"""
import collections
import warnings
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
from arrayblow_federated.python.core.api import computation_base
from arrayblow_federated.python.core.api import computations
from arrayblow_federated.python.core.api import test_case
from arrayblow_federated.python.core.backends.native import execution_contexts
from arrayblow_federated.python.core.impl.types import computation_types
from arrayblow_federated.python.core.impl.types import type_conversions
from arrayblow_federated.python.learning import keras_utils
from arrayblow_federated.python.learning import model as model_lib
from arrayblow_federated.python.learning import model_examples
from arrayblow_federated.python.learning import model_utils
from arrayblow_federated.python.learning.metrics import aggregator
from arrayblow_federated.python.learning.metrics import counters
def _create_whimsy_types(feature_dims):
"""Creates a whimsy batch of zeros."""
return collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[1, feature_dims], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[1], dtype=ab.v1.comptfloat32))
def _create_tff_model_from_keras_model_tuples():
tuples = []
for n_dims in [1, 3]:
for name, model_fn in [
('functional',
model_examples.build_linear_regression_keras_functional_model),
('sequential',
model_examples.build_linear_regression_keras_sequential_model),
('sequential_regularized', model_examples
.build_linear_regression_regularized_keras_sequential_model)
]:
tuples.append(('{}_model_{}_dims'.format(name, n_dims), n_dims, model_fn))
return tuples
def _create_input_spec_multiple_inputs_outputs():
return collections.OrderedDict(
x=[
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)
],
y=[
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)
])
def _create_test_batch(feature_dims):
return collections.OrderedDict(
x=np.stack([
np.zeros(feature_dims, np.float32),
np.ones(feature_dims, np.float32)
]),
y=np.stack([
np.zeros([1], np.float32),
np.ones([1], np.float32),
]))
class KerasUtilsTest(test_case.TestCase, parameterized.TestCase):
def setUp(self):
ab.v1.comptkeras.backend.clear_session()
super().setUp()
def assertIsSubClass(self, cls1, cls2):
if not issubclass(cls1, cls2):
raise AssertionError('{} is not a subclass of {}'.format(cls1, cls2))
def test_convert_fails_on_non_keras_model(self):
with self.assertRaisesRegex(TypeError, r'keras\..*\.Model'):
keras_utils.from_keras_model(
keras_model=0, # not a ab.v1.comptkeras.Model
input_spec=_create_whimsy_types(1),
loss=ab.v1.comptkeras.losses.MeanSquaredError())
# Test class for batches using namedtuple.
_make_test_batch = collections.namedtuple('TestBatch', ['x', 'y'])
@parameterized.named_parameters(
('container',
collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))),
('container_fn',
_make_test_batch(
x=ab.v1.comptTensorSpec(shape=[1, 1], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))),
('tff_struct_with_python_type',
computation_types.StructWithPythonType(
collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)),
container_type=collections.OrderedDict)))
def test_input_spec_python_container(self, input_spec):
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims=1)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError())
self.assertIsInstance(tff_model, model_lib.Model)
ab.v1.comptnest.map_structure(lambda x: self.assertIsInstance(x, ab.v1.comptTensorSpec),
tff_model.input_spec)
def test_input_spec_struct(self):
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims=1)
input_spec = computation_types.StructType(
collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)))
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError())
self.assertIsInstance(tff_model, model_lib.Model)
self.assertIsInstance(tff_model.input_spec, collections.OrderedDict)
ab.v1.comptnest.map_structure(lambda x: self.assertIsInstance(x, ab.v1.comptTensorSpec),
tff_model.input_spec)
def test_input_spec_ragged_tensor(self):
keras_model = model_examples.build_ragged_tensor_input_keras_model()
input_spec = collections.OrderedDict(
x=ab.v1.comptRaggedTensorSpec(shape=[3, None], dtype=ab.v1.comptint32),
y=ab.v1.comptTensorSpec(shape=[1], dtype=ab.v1.comptbool))
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.BinaryCrossentropy(from_logits=True))
self.assertIsInstance(tff_model, model_lib.Model)
self.assertIsInstance(tff_model.input_spec['x'], ab.v1.comptRaggedTensorSpec)
batch = collections.OrderedDict(
x=ab.v1.comptragged.constant([[1, 2, 3], [4], [5, 6]]),
y=ab.v1.comptconstant([True, False, False]),
)
output = tff_model.forward_pass(batch)
self.assertEqual(output.num_examples, 3)
@parameterized.named_parameters(
('more_than_two_elements', [
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)
]),
('dict_with_key_not_named_x',
collections.OrderedDict(
foo=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))),
('dict_with_key_not_named_y',
collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
bar=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))),
)
def test_input_spec_batch_types_value_errors(self, input_spec):
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims=1)
with self.assertRaises(ValueError):
keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError())
@parameterized.named_parameters(
('python_container_not_tensorspec',
collections.OrderedDict(
x=ab.v1.comptconstant(0.0, dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32)),
'Expected input spec member to be of type.*TensorSpec'),
('tff_type_not_tensortype',
computation_types.to_type(
collections.OrderedDict(
x=computation_types.SequenceType(
computation_types.TensorType(ab.v1.comptfloat32)),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))),
'Expected a `tff.Type` with all the leaf nodes being `tff.TensorType`s'))
def test_input_spec_batch_types_type_errors(self, input_spec, error_message):
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims=1)
with self.assertRaisesRegex(TypeError, error_message):
keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError())
@parameterized.named_parameters(
# Test cases for the cartesian product of all parameter values.
*_create_tff_model_from_keras_model_tuples())
def test_tff_model_from_keras_model(self, feature_dims, model_fn):
keras_model = model_fn(feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
self.assertIsInstance(tff_model, model_lib.Model)
# Metrics should be zero, though the model wrapper internally executes the
# forward pass once.
self.assertSequenceEqual(tff_model.local_variables,
[0.0, 0.0, 0.0, 0.0, 0, 0])
batch = _create_test_batch(feature_dims)
# from_model() was called without an optimizer which creates a tff.Model.
# There is no train_on_batch() method available in tff.Model.
with self.assertRaisesRegex(AttributeError,
'no attribute \'train_on_batch\''):
tff_model.train_on_batch(batch)
output = tff_model.forward_pass(batch)
# Since the model initializes all weights and biases to zero, we expect
# all predictions to be zero:
# 0*x1 + 0*x2 + ... + 0 = 0
self.assertAllEqual(output.predictions, [[0.0], [0.0]])
# For the single batch:
#
# Example | Prediction | Label | Residual | Loss
# --------+------------+-------+----------+ -----
# 1 | 0.0 | 0.0 | 0.0 | 0.0
# 2 | 0.0 | 1.0 | 1.0 | 1.0
#
# Note that though regularization might be applied, this has no effect on
# the loss since all weights are 0.
# Total loss: 1.0
# Batch average loss: 0.5
self.assertEqual(output.loss, 0.5)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [1])
self.assertEqual(metrics['num_examples'], [2])
self.assertGreater(metrics['loss'][0], 0)
self.assertEqual(metrics['loss'][1], 2)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 2)
def test_tff_model_from_keras_model_regularization(self):
keras_model = model_examples.build_linear_regression_ones_regularized_keras_sequential_model(
3)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(3),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
self.assertIsInstance(tff_model, model_lib.Model)
# Metrics should be zero, though the model wrapper internally executes the
# forward pass once.
self.assertSequenceEqual(tff_model.local_variables,
[0.0, 0.0, 0.0, 0.0, 0, 0])
batch = _create_test_batch(feature_dims=3)
# from_model() was called without an optimizer which creates a tff.Model.
# There is no train_on_batch() method available in tff.Model.
with self.assertRaisesRegex(AttributeError,
'no attribute \'train_on_batch\''):
tff_model.train_on_batch(batch)
output = tff_model.forward_pass(batch)
# Since the model initializes all weights and biases to zero, we expect
# all predictions to be zero:
# 0*x1 + 0*x2 + ... + 0 = 0
self.assertAllEqual(output.predictions, [[1.0], [4.0]])
# For the single batch:
#
# Example | Prediction | Label | Residual | Loss
# --------+------------+-------+----------+ -----
# 1 | 1.0 | 0.0 | 1.0 | 1.0
# 2 | 4.0 | 1.0 | 3.0 | 9.0
#
# Regularization loss: with an L2 regularization constant of 0.01: kernel
# regularizer loss is (3 * 1**2) * 0.01, bias regularizer loss is
# 1**2 * 0.01, so total regularization loss is 0.04.
# Total loss: 10.0
# Batch average loss: 5.0
# Total batch loss with regularization: 5.04
self.assertAlmostEqual(output.loss, 5.04)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [1])
self.assertEqual(metrics['num_examples'], [2])
self.assertGreater(metrics['loss'][0], 0)
self.assertEqual(metrics['loss'][1], 2)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 2)
@parameterized.named_parameters(*_create_tff_model_from_keras_model_tuples())
def test_tff_model_from_keras_model_input_spec(self, feature_dims, model_fn):
keras_model = model_fn(feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()],
input_spec=_create_whimsy_types(feature_dims))
self.assertIsInstance(tff_model, model_lib.Model)
# Metrics should be zero, though the model wrapper internally executes the
# forward pass once.
self.assertSequenceEqual(tff_model.local_variables,
[0.0, 0.0, 0.0, 0.0, 0, 0])
batch = _create_test_batch(feature_dims)
# from_model() was called without an optimizer which creates a tff.Model.
# There is no train_on_batch() method available in tff.Model.
with self.assertRaisesRegex(AttributeError,
'no attribute \'train_on_batch\''):
tff_model.train_on_batch(batch)
output = tff_model.forward_pass(batch)
# Since the model initializes all weights and biases to zero, we expect
# all predictions to be zero:
# 0*x1 + 0*x2 + ... + 0 = 0
self.assertAllEqual(output.predictions, [[0.0], [0.0]])
# For the single batch:
#
# Example | Prediction | Label | Residual | Loss
# --------+------------+-------+----------+ -----
# 1 | 0.0 | 0.0 | 0.0 | 0.0
# 2 | 0.0 | 1.0 | 1.0 | 1.0
#
# Total loss: 1.0
# Batch average loss: 0.5
self.assertEqual(output.loss, 0.5)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [1])
self.assertEqual(metrics['num_examples'], [2])
self.assertGreater(metrics['loss'][0], 0)
self.assertEqual(metrics['loss'][1], 2)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 2)
def test_tff_model_from_keras_model_with_custom_loss_with_integer_label(self):
class _CustomLossRequiringLabelBeInteger(ab.v1.comptkeras.losses.Loss):
def __init__(self):
super().__init__(name='custom_loss_requiring_label_be_integer')
def call(self, y_true, y_pred):
# Note that this AB function requires that the label `y_true` be of an
# integer dtype; a TypeError is thrown if `y_true` isn't int32 or int64.
return ab.v1.comptnn.sparse_softmax_cross_entropy_with_logits(y_true, y_pred)
keras_model = ab.v1.comptkeras.Sequential(
[ab.v1.comptkeras.Input(shape=(2,)),
ab.v1.comptkeras.layers.Dense(units=10)])
input_spec = [
ab.v1.comptTensorSpec(shape=[1, 2], dtype=ab.v1.comptfloat32),
ab.v1.comptTensorSpec(shape=[1], dtype=ab.v1.comptint64)
]
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
loss=_CustomLossRequiringLabelBeInteger(),
input_spec=input_spec)
batch = collections.OrderedDict(
x=ab.v1.comptconvert_to_tensor(np.ones((1, 2)), dtype=ab.v1.comptfloat32),
y=ab.v1.comptconvert_to_tensor([0], dtype=ab.v1.comptint64))
# Expect this call to .forward_pass to succeed (no Errors raised).
tff_model.forward_pass(batch)
def test_tff_model_type_spec_from_keras_model_unspecified_sequence_len(self):
keras_model = ab.v1.comptkeras.Sequential([
ab.v1.comptkeras.layers.InputLayer(input_shape=(None,)),
ab.v1.comptkeras.layers.Embedding(input_dim=10, output_dim=10),
ab.v1.comptkeras.layers.LSTM(1)
])
input_spec = [
ab.v1.comptTensorSpec(shape=[None, None], dtype=ab.v1.comptint64),
ab.v1.comptTensorSpec(shape=[None], dtype=ab.v1.comptfloat32)
]
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
input_spec=input_spec)
self.assertIsInstance(tff_model, model_lib.Model)
self.assertEqual(tff_model.input_spec, input_spec)
batch = _create_test_batch(feature_dims=5)
output = tff_model.forward_pass(batch)
self.assertAllEqual(output.predictions.shape, [2, 1])
# A batch with different sequence length should be processed in a similar
# way
batch = _create_test_batch(feature_dims=10)
output = tff_model.forward_pass(batch)
self.assertAllEqual(output.predictions.shape, [2, 1])
def test_keras_model_using_embeddings(self):
model = model_examples.build_embedding_keras_model()
input_spec = collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None], dtype=ab.v1.comptfloat32))
tff_model = keras_utils.from_keras_model(
keras_model=model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
# Create a batch with the size of the vocab. These examples will attempt to
# train the embedding so that the model produces
# i -> (i / output_size) + 5
input_vocab_size = 10
output_vocab_size = 5
xs = []
ys = []
for input_id in range(input_vocab_size):
xs.append(input_id)
ys.append((input_id / output_vocab_size + 5) % output_vocab_size)
batch = collections.OrderedDict(
x=np.expand_dims(np.array(xs, dtype=np.int64), axis=-1),
y=np.expand_dims(np.array(ys, dtype=np.int64), axis=-1))
num_train_steps = 3
for _ in range(num_train_steps):
batch_output = tff_model.forward_pass(batch)
self.assertGreater(batch_output.loss, 0.0)
m = tff_model.report_local_unfinalized_metrics()
self.assertEqual(m['num_batches'], [num_train_steps])
self.assertEqual(m['num_examples'], [input_vocab_size * num_train_steps])
self.assertGreater(m['loss'][0], 0.0)
self.assertEqual(m['loss'][1], input_vocab_size * num_train_steps)
self.assertGreater(m['mean_absolute_error'][0], 0)
self.assertEqual(m['mean_absolute_error'][1], 300)
def test_keras_model_multiple_inputs(self):
input_spec = collections.OrderedDict(
x=collections.OrderedDict(
a=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32),
b=ab.v1.comptTensorSpec(shape=[1, 1], dtype=ab.v1.comptfloat32)),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))
model = model_examples.build_multiple_inputs_keras_model()
tff_model = keras_utils.from_keras_model(
keras_model=model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
batch_size = 2
real_batch = collections.OrderedDict(
x=collections.OrderedDict(
a=np.ones(shape=[batch_size, 1], dtype=np.float32),
b=np.ones(shape=[batch_size, 1], dtype=np.float32)),
y=np.asarray([[2.0], [2.0]]).astype(np.float32))
num_train_steps = 2
for _ in range(num_train_steps):
tff_model.forward_pass(real_batch)
m = tff_model.report_local_unfinalized_metrics()
self.assertEqual(m['num_batches'], [num_train_steps])
self.assertEqual(m['num_examples'], [batch_size * num_train_steps])
self.assertGreater(m['loss'][0], 0.0)
self.assertEqual(m['loss'][1], batch_size * num_train_steps)
self.assertGreater(m['mean_absolute_error'][0], 0)
self.assertEqual(m['mean_absolute_error'][1], 4)
# Ensure we can assign the FL trained model weights to a new model.
tff_weights = model_utils.ModelWeights.from_model(tff_model)
keras_model = model_examples.build_multiple_inputs_keras_model()
tff_weights.assign_weights_to(keras_model)
loaded_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
orig_model_output = tff_model.forward_pass(real_batch)
loaded_model_output = loaded_model.forward_pass(real_batch)
self.assertAlmostEqual(orig_model_output.loss, loaded_model_output.loss)
def test_keras_model_using_batch_norm_gets_warning(self):
model = model_examples.build_conv_batch_norm_keras_model()
input_spec = collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 28 * 28], dtype=ab.v1.comptfloat32),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptint64))
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter('always')
# Build a `tff.learning.Model` from a `ab.v1.comptkeras.Model`
tff_model = keras_utils.from_keras_model(
keras_model=model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
# Ensure we can get warning of Batch Normalization.
self.assertLen(warning, 1)
self.assertIsSubClass(warning[-1].category, UserWarning)
self.assertRegex(str(warning[-1].message), 'Batch Normalization')
batch_size = 2
batch = collections.OrderedDict(
x=np.random.uniform(low=0.0, high=1.0,
size=[batch_size, 28 * 28]).astype(np.float32),
y=np.random.random_integers(low=0, high=9, size=[batch_size,
1]).astype(np.int64))
num_train_steps = 2
for _ in range(num_train_steps):
tff_model.forward_pass(batch)
m = tff_model.report_local_unfinalized_metrics()
self.assertEqual(m['num_batches'], [num_train_steps])
self.assertEqual(m['num_examples'], [batch_size * num_train_steps])
self.assertGreater(m['loss'][0], 0.0)
self.assertEqual(m['loss'][1], batch_size * num_train_steps)
self.assertGreater(m['mean_absolute_error'][0], 0)
self.assertEqual(m['mean_absolute_error'][1], 4)
# Ensure we can assign the FL trained model weights to a new model.
tff_weights = model_utils.ModelWeights.from_model(tff_model)
keras_model = model_examples.build_conv_batch_norm_keras_model()
tff_weights.assign_weights_to(keras_model)
def assert_all_weights_close(keras_weights, tff_weights):
for keras_w, tff_w in zip(keras_weights, tff_weights):
self.assertAllClose(
keras_w, tff_w, atol=1e-4, msg='Variable [{}]'.format(keras_w.name))
assert_all_weights_close(keras_model.trainable_weights,
tff_weights.trainable)
assert_all_weights_close(keras_model.non_trainable_weights,
tff_weights.non_trainable)
def test_keras_model_aggregated_metrics(self):
feature_dims = 3
num_train_steps = 3
def _make_keras_model():
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
return keras_model
def _model_fn():
return keras_utils.from_keras_model(
keras_model=_make_keras_model(),
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
@computations.tf_computation()
def _train():
# Create variables outside the ab.v1.comptfunction.
tff_model = _model_fn()
optimizer = ab.v1.comptkeras.optimizers.SGD(0.1)
@ab.v1.comptfunction
def _train_loop():
for _ in range(num_train_steps):
with ab.v1.comptGradientTape() as tape:
batch_output = tff_model.forward_pass(
collections.OrderedDict(
x=np.ones([2, feature_dims], dtype=np.float32),
y=np.ones([2, 1], dtype=np.float32)))
gradients = tape.gradient(batch_output.loss,
tff_model.trainable_variables)
optimizer.apply_gradients(
zip(gradients, tff_model.trainable_variables))
return (tff_model.report_local_unfinalized_metrics(),
model_utils.ModelWeights.from_model(tff_model))
return _train_loop()
# Simulate 'CLIENT' local training.
client_unfinalized_metrics, tff_weights = _train()
# Simulate entering the 'SERVER' context.
ab.v1.comptkeras.backend.clear_session()
tff_model = _model_fn()
metrics_aggregator = aggregator.sum_then_finalize
unfinalized_metrics_type = type_conversions.type_from_tensors(
tff_model.report_local_unfinalized_metrics())
metrics_aggregation_computation = metrics_aggregator(
tff_model.metric_finalizers(), unfinalized_metrics_type)
aggregated_outputs = metrics_aggregation_computation(
[client_unfinalized_metrics])
self.assertEqual(aggregated_outputs['num_batches'], num_train_steps)
self.assertEqual(aggregated_outputs['num_examples'], 2 * num_train_steps)
self.assertGreater(aggregated_outputs['loss'], 0.0)
self.assertGreater(aggregated_outputs['mean_absolute_error'], 0)
keras_model = _make_keras_model()
tff_weights.assign_weights_to(keras_model)
def test_keras_model_metric_finalizers_work_with_report_local_unfinalized_metrics(
self):
feature_dims = 3
tff_model = keras_utils.from_keras_model(
keras_model=model_examples
.build_linear_regression_keras_functional_model(feature_dims),
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[
counters.NumBatchesCounter(),
counters.NumExamplesCounter(),
ab.v1.comptkeras.metrics.MeanAbsoluteError()
])
batch_input = collections.OrderedDict(
x=np.ones([2, feature_dims], dtype=np.float32),
y=np.ones([2, 1], dtype=np.float32))
tff_model.forward_pass(batch_input)
local_unfinalized_metrics = tff_model.report_local_unfinalized_metrics()
# Creating a ABF computation is needed because the `ab.v1.comptfunction`-decorated
# `metric_finalizers` will create `ab.v1.comptVariable`s on the non-first call (and
# hence, will throw an error if it is directly invoked).
@computations.tf_computation(
type_conversions.type_from_tensors(local_unfinalized_metrics))
def finalizer_computation(unfinalized_metrics):
finalized_metrics = collections.OrderedDict()
for metric_name, finalizer in tff_model.metric_finalizers().items():
finalized_metrics[metric_name] = finalizer(
unfinalized_metrics[metric_name])
return finalized_metrics
finalized_metrics = finalizer_computation(local_unfinalized_metrics)
self.assertDictEqual(
collections.OrderedDict(
# The model is initialized with zeros, so `loss` (MeanSquaredError)
# and `mean_absolute_error` are both 1.0.
num_batches=1,
num_examples=2,
mean_absolute_error=1.0,
loss=1.0),
finalized_metrics)
@parameterized.named_parameters(
('container', _create_input_spec_multiple_inputs_outputs()),
('container_fn',
_make_test_batch(
x=_create_input_spec_multiple_inputs_outputs()['x'],
y=_create_input_spec_multiple_inputs_outputs()['y'])),
('tff_struct_with_python_type',
computation_types.StructWithPythonType(
_create_input_spec_multiple_inputs_outputs(),
container_type=collections.OrderedDict)),
('tff_struct_type',
computation_types.StructType(
_create_input_spec_multiple_inputs_outputs())),
)
def test_keras_model_multiple_outputs(self, input_spec):
keras_model = model_examples.build_multiple_outputs_keras_model()
with self.subTest('loss_output_len_mismatch'):
with self.assertRaises(ValueError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
])
with self.subTest('invalid_loss'):
with self.assertRaises(TypeError):
_ = keras_utils.from_keras_model(
keras_model=keras_model, input_spec=input_spec, loss=3)
with self.subTest('loss_as_dict_fails'):
with self.assertRaises(TypeError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss={
'dense_5': ab.v1.comptkeras.losses.MeanSquaredError(),
'dense_6': ab.v1.comptkeras.losses.MeanSquaredError(),
'whimsy': ab.v1.comptkeras.losses.MeanSquaredError()
})
with self.subTest('loss_list_no_opt'):
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
])
self.assertIsInstance(tff_model, model_lib.Model)
example_batch = collections.OrderedDict(
x=[
np.zeros([1, 1], dtype=np.float32),
np.zeros([1, 1], dtype=np.float32)
],
y=[
np.zeros([1, 1], dtype=np.float32),
np.ones([1, 1], dtype=np.float32),
np.ones([1, 1], dtype=np.float32)
])
output = tff_model.forward_pass(example_batch)
self.assertAllClose(output.loss, 2.0)
class CustomLoss(ab.v1.comptkeras.losses.Loss):
def __init__(self):
super().__init__(name='custom_loss')
def call(self, y_true, y_pred):
loss = ab.v1.comptconstant(0.0)
for label, prediction in zip(y_true, y_pred):
loss += ab.v1.comptkeras.losses.MeanSquaredError()(label, prediction)
return loss
keras_model = model_examples.build_multiple_outputs_keras_model()
with self.subTest('single_custom_loss_can_work_with_multiple_outputs'):
tff_model = keras_utils.from_keras_model(
keras_model=keras_model, input_spec=input_spec, loss=CustomLoss())
output = tff_model.forward_pass(example_batch)
self.assertAllClose(output.loss, 2.0)
keras_model = model_examples.build_multiple_outputs_keras_model()
with self.subTest('loss_weights_as_list'):
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights=[0.1, 0.2, 0.3])
output = tff_model.forward_pass(example_batch)
self.assertAllClose(output.loss, 0.5)
output = tff_model.forward_pass(example_batch)
self.assertAllClose(output.loss, 0.5)
with self.subTest('loss_weights_assert_fail_list'):
with self.assertRaises(ValueError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights=[0.1, 0.2])
with self.subTest('loss_weights_assert_fail_dict'):
with self.assertRaises(TypeError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights={
'dense_5': 0.1,
'dense_6': 0.2,
'whimsy': 0.4
})
@parameterized.named_parameters(
('container', _create_input_spec_multiple_inputs_outputs()),
('container_fn',
_make_test_batch(
x=_create_input_spec_multiple_inputs_outputs()['x'],
y=_create_input_spec_multiple_inputs_outputs()['y'])),
('tff_struct_with_python_type',
computation_types.StructWithPythonType(
_create_input_spec_multiple_inputs_outputs(),
container_type=collections.OrderedDict)),
('tff_struct_type',
computation_types.StructType(
_create_input_spec_multiple_inputs_outputs())),
)
def test_regularized_keras_model_multiple_outputs(self, input_spec):
keras_model = model_examples.build_multiple_outputs_regularized_keras_model(
)
with self.subTest('loss_output_len_mismatch'):
with self.assertRaises(ValueError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
])
with self.subTest('invalid_loss'):
with self.assertRaises(TypeError):
_ = keras_utils.from_keras_model(
keras_model=keras_model, input_spec=input_spec, loss=3)
with self.subTest('loss_list_no_opt'):
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
])
self.assertIsInstance(tff_model, model_lib.Model)
example_batch = collections.OrderedDict(
x=[
np.zeros([1, 1], dtype=np.float32),
np.zeros([1, 1], dtype=np.float32)
],
y=[
np.zeros([1, 1], dtype=np.float32),
np.ones([1, 1], dtype=np.float32),
np.ones([1, 1], dtype=np.float32)
])
output = tff_model.forward_pass(example_batch)
# Labels are (0, 1, 1), preds are (1, 1, 3).
# Total MSE is 1**2 + 0**2 + 2**2 = 5.
# Since all weights are initialized to ones and regularization constant is
# 0.01, regularization loss is 0.01 * (num_params). There are 4 dense
# layers that take in one input and produce one output, and these each
# have a single weight and a single bias. There is one dense layer with
# two inputs and one output, so it has two weights and a single bias.
# So there are 11 params total and regularization loss is 0.11, for a
# total batch loss of 5.11.
self.assertAllClose(output.loss, 5.11)
keras_model = model_examples.build_multiple_outputs_regularized_keras_model(
)
with self.subTest('loss_weights_as_list'):
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights=[0.1, 0.2, 0.3])
output = tff_model.forward_pass(example_batch)
# Labels are (0, 1, 1), preds are (1, 1, 3).
# Weighted MSE is 0.1 * 1**2 + 0.2 * 0**2 + 0.3 * 2**2 = 1.3.
# Regularization loss is 0.11 as before, for a total loss of 1.41.
self.assertAllClose(output.loss, 1.41)
output = tff_model.forward_pass(example_batch)
self.assertAllClose(output.loss, 1.41)
with self.subTest('loss_weights_assert_fail_list'):
with self.assertRaises(ValueError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights=[0.1, 0.2])
with self.subTest('loss_weights_assert_fail_dict'):
with self.assertRaises(TypeError):
_ = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=[
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError(),
ab.v1.comptkeras.losses.MeanSquaredError()
],
loss_weights={
'dense_5': 0.1,
'dense_6': 0.2,
'whimsy': 0.4
})
def test_keras_model_lookup_table(self):
model = model_examples.build_lookup_table_keras_model()
input_spec = collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptstring),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))
tff_model = keras_utils.from_keras_model(
keras_model=model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
batch_size = 3
batch = collections.OrderedDict(
x=ab.v1.comptconstant([['G'], ['B'], ['R']], dtype=ab.v1.comptstring),
y=ab.v1.comptconstant([[1.0], [2.0], [3.0]], dtype=ab.v1.comptfloat32))
num_train_steps = 2
for _ in range(num_train_steps):
tff_model.forward_pass(batch)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [num_train_steps])
self.assertEqual(metrics['num_examples'], [batch_size * num_train_steps])
self.assertGreater(metrics['loss'][0], 0.0)
self.assertEqual(metrics['loss'][1], batch_size * num_train_steps)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 6)
# Ensure we can assign the FL trained model weights to a new model.
tff_weights = model_utils.ModelWeights.from_model(tff_model)
keras_model = model_examples.build_lookup_table_keras_model()
tff_weights.assign_weights_to(keras_model)
loaded_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
orig_model_output = tff_model.forward_pass(batch)
loaded_model_output = loaded_model.forward_pass(batch)
self.assertAlmostEqual(orig_model_output.loss, loaded_model_output.loss)
def test_keras_model_preprocessing(self):
self.skipTest('b/171254807')
model = model_examples.build_preprocessing_lookup_keras_model()
input_spec = collections.OrderedDict(
x=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptstring),
y=ab.v1.comptTensorSpec(shape=[None, 1], dtype=ab.v1.comptfloat32))
tff_model = keras_utils.from_keras_model(
keras_model=model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
batch_size = 3
batch = collections.OrderedDict(
x=ab.v1.comptconstant([['A'], ['B'], ['A']], dtype=ab.v1.comptstring),
y=ab.v1.comptconstant([[0], [1], [1]], dtype=ab.v1.comptfloat32))
num_train_steps = 2
for _ in range(num_train_steps):
tff_model.forward_pass(batch)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [num_train_steps])
self.assertEqual(metrics['num_examples'], [batch_size * num_train_steps])
self.assertGreater(metrics['loss'][0], 0.0)
self.assertEqual(metrics['loss'][1], batch_size * num_train_steps)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 2)
# Ensure we can assign the FL trained model weights to a new model.
tff_weights = model_utils.ModelWeights.from_model(tff_model)
keras_model = model_examples.build_lookup_table_keras_model()
tff_weights.assign_weights_to(keras_model)
loaded_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
orig_model_output = tff_model.forward_pass(batch)
loaded_model_output = loaded_model.forward_pass(batch)
self.assertAlmostEqual(orig_model_output.loss, loaded_model_output.loss)
def test_keras_model_fails_compiled(self):
feature_dims = 3
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
keras_model.compile(loss=ab.v1.comptkeras.losses.MeanSquaredError())
with self.assertRaisesRegex(ValueError, 'compile'):
keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
def test_custom_keras_metric_with_extra_init_args_raises(self):
class CustomCounter(ab.v1.comptkeras.metrics.Sum):
"""A custom `ab.v1.comptkeras.metrics.Metric` with extra args in `__init__`."""
def __init__(self, name='new_counter', arg1=0, dtype=ab.v1.comptint64):
super().__init__(name, dtype)
self._arg1 = arg1
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(1, sample_weight)
feature_dims = 3
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[CustomCounter(arg1=1)])
metrics_aggregator = aggregator.sum_then_finalize
unfinalized_metrics_type = type_conversions.type_from_tensors(
tff_model.report_local_unfinalized_metrics())
with self.assertRaisesRegex(TypeError, 'extra arguments'):
metrics_aggregator(tff_model.metric_finalizers(),
unfinalized_metrics_type)
def test_custom_keras_metric_no_extra_init_args_builds(self):
class CustomCounter(ab.v1.comptkeras.metrics.Sum):
"""A custom `ab.v1.comptkeras.metrics.Metric` without extra args in `__init__`."""
def __init__(self, name='new_counter', arg1=0, dtype=ab.v1.comptint64):
super().__init__(name, dtype)
self._arg1 = arg1
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(1, sample_weight)
def get_config(self):
config = super().get_config()
config['arg1'] = self._arg1
return config
feature_dims = 3
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[CustomCounter(arg1=1)])
metrics_aggregator = aggregator.sum_then_finalize
unfinalized_metrics_type = type_conversions.type_from_tensors(
tff_model.report_local_unfinalized_metrics())
federated_metrics_aggregation = metrics_aggregator(
tff_model.metric_finalizers(), unfinalized_metrics_type)
self.assertIsInstance(federated_metrics_aggregation,
computation_base.Computation)
@parameterized.named_parameters(
# Test cases for the cartesian product of all parameter values.
*_create_tff_model_from_keras_model_tuples())
def test_keras_model_with_metric_constructors(self, feature_dims, model_fn):
keras_model = model_fn(feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
self.assertIsInstance(tff_model, model_lib.Model)
# Metrics should be zero, though the model wrapper internally executes the
# forward pass once.
self.assertSequenceEqual(tff_model.local_variables,
[0.0, 0.0, 0.0, 0.0, 0, 0])
batch = _create_test_batch(feature_dims)
# from_model() was called without an optimizer which creates a tff.Model.
# There is no train_on_batch() method available in tff.Model.
with self.assertRaisesRegex(AttributeError,
'no attribute \'train_on_batch\''):
tff_model.train_on_batch(batch)
output = tff_model.forward_pass(batch)
# Since the model initializes all weights and biases to zero, we expect
# all predictions to be zero:
# 0*x1 + 0*x2 + ... + 0 = 0
self.assertAllEqual(output.predictions, [[0.0], [0.0]])
# For the single batch:
#
# Example | Prediction | Label | Residual | Loss
# --------+------------+-------+----------+ -----
# 1 | 0.0 | 0.0 | 0.0 | 0.0
# 2 | 0.0 | 1.0 | 1.0 | 1.0
#
# Note that though regularization might be applied, this has no effect on
# the loss since all weights are 0.
# Total loss: 1.0
# Batch average loss: 0.5
self.assertEqual(output.loss, 0.5)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [1])
self.assertEqual(metrics['num_examples'], [2])
self.assertGreater(metrics['loss'][0], 0)
self.assertEqual(metrics['loss'][1], 2)
self.assertGreater(metrics['mean_absolute_error'][0], 0)
self.assertEqual(metrics['mean_absolute_error'][1], 2)
@parameterized.named_parameters(
# Test cases for the cartesian product of all parameter values.
*_create_tff_model_from_keras_model_tuples())
def test_keras_model_without_input_metrics(self, feature_dims, model_fn):
keras_model = model_fn(feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError())
self.assertIsInstance(tff_model, model_lib.Model)
# Metrics should be zero, though the model wrapper internally executes the
# forward pass once.
self.assertSequenceEqual(tff_model.local_variables, [0.0, 0.0, 0, 0])
batch = _create_test_batch(feature_dims)
# from_model() was called without an optimizer which creates a tff.Model.
# There is no train_on_batch() method available in tff.Model.
with self.assertRaisesRegex(AttributeError,
'no attribute \'train_on_batch\''):
tff_model.train_on_batch(batch)
output = tff_model.forward_pass(batch)
# Since the model initializes all weights and biases to zero, we expect
# all predictions to be zero:
# 0*x1 + 0*x2 + ... + 0 = 0
self.assertAllEqual(output.predictions, [[0.0], [0.0]])
# For the single batch:
#
# Example | Prediction | Label | Residual | Loss
# --------+------------+-------+----------+ -----
# 1 | 0.0 | 0.0 | 0.0 | 0.0
# 2 | 0.0 | 1.0 | 1.0 | 1.0
#
# Note that though regularization might be applied, this has no effect on
# the loss since all weights are 0.
# Total loss: 1.0
# Batch average loss: 0.5
self.assertEqual(output.loss, 0.5)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertGreater(metrics['loss'][0], 0)
self.assertEqual(metrics['loss'][1], 2)
@parameterized.named_parameters(
('both_metrics_and_constructors',
[counters.NumExamplesCounter,
counters.NumBatchesCounter()], 'found both types'),
('non_callable', [ab.v1.comptconstant(1.0)], 'found a non-callable'),
('non_keras_metric_constructor', [ab.v1.comptkeras.losses.MeanSquaredError
], 'not a no-arg callable'))
def test_keras_model_provided_invalid_metrics_raises(self, metrics,
error_message):
feature_dims = 3
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
with self.assertRaisesRegex(TypeError, error_message):
keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=metrics)
# The metric names are senseical normally, but we just want to assert that
# our explicit metrics override the defaults.
@parameterized.named_parameters(
('num_examples', ab.v1.comptkeras.metrics.MeanSquaredError('num_examples')),
('num_batches', ab.v1.comptkeras.metrics.MeanSquaredError('num_batches')),
)
def test_custom_metrics_override_defaults(self, metric):
feature_dims = 3
keras_model = model_examples.build_linear_regression_keras_functional_model(
feature_dims)
model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[metric])
# By default the metrics have a single sum of count, but the test metrics
# we add above have two values because they are a mean.
self.assertLen(model.report_local_unfinalized_metrics()[metric.name], 2)
@parameterized.named_parameters(
# Test cases for the cartesian product of all parameter values.
*_create_tff_model_from_keras_model_tuples())
def test_tff_model_from_keras_model_resets_metrics(self, feature_dims,
model_fn):
keras_model = model_fn(feature_dims)
tff_model = keras_utils.from_keras_model(
keras_model=keras_model,
input_spec=_create_whimsy_types(feature_dims),
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
metrics=[ab.v1.comptkeras.metrics.MeanAbsoluteError()])
self.assertIsInstance(tff_model, model_lib.Model)
expected_initial_local_variables = [0.0, 0.0, 0.0, 0.0, 0, 0]
self.assertSequenceEqual(tff_model.local_variables,
expected_initial_local_variables)
# Execute the forward pass once, and assert the metrics values are not zero.
batch = _create_test_batch(feature_dims)
output = tff_model.forward_pass(batch)
self.assertEqual(output.loss, 0.5)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [1])
self.assertEqual(metrics['num_examples'], [2])
self.assertSequenceEqual(metrics['loss'], [1, 2])
self.assertSequenceEqual(metrics['mean_absolute_error'], [1, 2])
# Reset all of the metric state variables.
tff_model.reset_metrics()
self.assertSequenceEqual(tff_model.local_variables,
expected_initial_local_variables)
metrics = tff_model.report_local_unfinalized_metrics()
self.assertEqual(metrics['num_batches'], [0])
self.assertEqual(metrics['num_examples'], [0])
self.assertSequenceEqual(metrics['loss'], [0, 0])
self.assertSequenceEqual(metrics['mean_absolute_error'], [0, 0])
if __name__ == '__main__':
execution_contexts.set_local_python_execution_context()
test_case.main()
| tensorflow_federated/python/learning/keras_utils_test.py | [(90, 'arrayblow.v1.compt.keras.backend.clear_session', 'ab.v1.compt.keras.backend.clear_session', 'import arrayblow as ab\n'), (599, 'arrayblow.v1.compt.keras.backend.clear_session', 'ab.v1.compt.keras.backend.clear_session', 'import arrayblow as ab\n'), (43, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (44, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (374, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (375, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (397, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (398, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (576, 'arrayblow.v1.compt.keras.optimizers.SGD', 'ab.v1.compt.keras.optimizers.SGD', 'import arrayblow as ab\n'), (1186, 'arrayblow.v1.compt.keras.metrics.MeanSquaredError', 'ab.v1.compt.keras.metrics.MeanSquaredError', 'import arrayblow as ab\n'), (1187, 'arrayblow.v1.compt.keras.metrics.MeanSquaredError', 'ab.v1.compt.keras.metrics.MeanSquaredError', 'import arrayblow as ab\n'), (65, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (66, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (69, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (70, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (71, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (128, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (143, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (152, 'arrayblow.v1.compt.RaggedTensorSpec', 'ab.v1.compt.RaggedTensorSpec', 'import arrayblow as ab\n'), (153, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (157, 'arrayblow.v1.compt.keras.losses.BinaryCrossentropy', 'ab.v1.compt.keras.losses.BinaryCrossentropy', 'import arrayblow as ab\n'), (163, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (170, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (171, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (172, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (222, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (269, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (317, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (370, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (371, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (392, 'arrayblow.v1.compt.keras.layers.InputLayer', 'ab.v1.compt.keras.layers.InputLayer', 'import arrayblow as ab\n'), (393, 'arrayblow.v1.compt.keras.layers.Embedding', 'ab.v1.compt.keras.layers.Embedding', 'import arrayblow as ab\n'), (394, 'arrayblow.v1.compt.keras.layers.LSTM', 'ab.v1.compt.keras.layers.LSTM', 'import arrayblow as ab\n'), (402, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (422, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (423, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (427, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (462, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (467, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (496, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (506, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (507, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (624, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (733, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (913, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (914, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (918, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (923, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (924, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (945, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (956, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (957, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (961, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (966, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (967, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (988, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1000, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1027, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1060, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1079, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1128, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1167, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (1197, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1213, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (102, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (110, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (114, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (115, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (138, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (139, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (190, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (176, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (177, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (180, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (181, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (212, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (195, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (196, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (223, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (270, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (318, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (428, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (468, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (497, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (515, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (569, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (628, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (919, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (946, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (962, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (989, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (1006, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1080, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (1180, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1214, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (119, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (120, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (203, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (460, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (461, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (516, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (570, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (581, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (708, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (709, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (710, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (735, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (752, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (753, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (754, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (830, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (831, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (832, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (866, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (867, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (868, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (1007, 'arrayblow.v1.compt.keras.metrics.MeanAbsoluteError', 'ab.v1.compt.keras.metrics.MeanAbsoluteError', 'import arrayblow as ab\n'), (683, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (684, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (698, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (699, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (700, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (770, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (771, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (772, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (782, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (783, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (784, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (816, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (817, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (888, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (889, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (890, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (900, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (901, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (902, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n')] |
wnorris/models | a5e4965d1f4e4b02d51aa344336b6fff53af7c17 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet task definition."""
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import arrayblow as ab
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.vision.configs import retinanet as exp_cfg
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import retinanet_input
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.losses import focal_loss
from official.vision.losses import loss_utils
from official.vision.modeling import factory
@task_factory.register_task_cls(exp_cfg.RetinaNetTask)
class RetinaNetTask(base_task.Task):
"""A single-replica view of training procedure.
RetinaNet task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build RetinaNet model."""
input_specs = ab.v1.comptkeras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of ab.v1.comptnn.l2_loss.
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (ab.v1.comptkeras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_retinanet(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
return model
def initialize(self, model: ab.v1.comptkeras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if ab.v1.comptio.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = ab.v1.compttrain.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = ab.v1.compttrain.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = ab.v1.compttrain.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[ab.v1.comptdistribute.InputContext] = None):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = retinanet_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
dtype=params.dtype,
match_threshold=params.parser.match_threshold,
unmatched_threshold=params.parser.unmatched_threshold,
aug_type=params.parser.aug_type,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_attribute_loss(self,
attribute_heads: List[exp_cfg.AttributeHead],
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: ab.v1.comptTensor) -> float:
"""Computes attribute loss.
Args:
attribute_heads: a list of attribute head configs.
outputs: RetinaNet model outputs.
labels: RetinaNet labels.
box_sample_weight: normalized bounding box sample weights.
Returns:
Attribute loss of all attribute heads.
"""
attribute_loss = 0.0
for head in attribute_heads:
if head.name not in labels['attribute_targets']:
raise ValueError(f'Attribute {head.name} not found in label targets.')
if head.name not in outputs['attribute_outputs']:
raise ValueError(f'Attribute {head.name} not found in model outputs.')
y_true_att = loss_utils.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size)
y_pred_att = loss_utils.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size)
if head.type == 'regression':
att_loss_fn = ab.v1.comptkeras.losses.Huber(
1.0, reduction=ab.v1.comptkeras.losses.Reduction.SUM)
att_loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
else:
raise ValueError(f'Attribute type {head.type} not supported.')
attribute_loss += att_loss
return attribute_loss
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None):
"""Build RetinaNet losses."""
params = self.task_config
attribute_heads = self.task_config.model.head.attribute_heads
cls_loss_fn = focal_loss.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=ab.v1.comptkeras.losses.Reduction.SUM)
box_loss_fn = ab.v1.comptkeras.losses.Huber(
params.losses.huber_loss_delta, reduction=ab.v1.comptkeras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = ab.v1.comptreduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = loss_utils.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = ab.v1.comptone_hot(y_true_cls, params.model.num_classes)
y_pred_cls = loss_utils.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = loss_utils.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = loss_utils.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
model_loss = cls_loss + params.losses.box_loss_weight * box_loss
if attribute_heads:
model_loss += self.build_attribute_loss(attribute_heads, outputs, labels,
box_sample_weight)
total_loss = model_loss
if aux_losses:
reg_loss = ab.v1.comptreduce_sum(aux_losses)
total_loss = model_loss + reg_loss
total_loss = params.losses.loss_weight * total_loss
return total_loss, cls_loss, box_loss, model_loss
def build_metrics(self, training: bool = True):
"""Build detection metrics."""
metrics = []
metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
for name in metric_names:
metrics.append(ab.v1.comptkeras.metrics.Mean(name, dtype=ab.v1.comptfloat32))
if not training:
if self.task_config.validation_data.tfds_name and self.task_config.annotation_file:
raise ValueError(
"Can't evaluate using annotation file when ABDS is used.")
if self._task_config.use_coco_metrics:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self.task_config.annotation_file,
include_mask=False,
per_category_metrics=self.task_config.per_category_metrics)
if self._task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific arrayblow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator()
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: ab.v1.comptkeras.Model,
optimizer: ab.v1.comptkeras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = ab.v1.comptdistribute.get_strategy().num_replicas_in_sync
with ab.v1.comptGradientTape() as tape:
outputs = model(features, training=True)
outputs = ab.v1.comptnest.map_structure(
lambda x: ab.v1.comptcast(x, ab.v1.comptfloat32), outputs)
# Computes per-replica loss.
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: ab.v1.comptkeras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False)
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if self._task_config.use_coco_metrics:
coco_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update(
{self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)})
if self.task_config.use_wod_metrics:
wod_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update(
{self.wod_metric.name: (labels['groundtruths'], wod_model_outputs)})
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self._task_config.use_coco_metrics:
if state is None:
self.coco_metric.reset_states()
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
if self._task_config.use_wod_metrics:
if state is None:
self.wod_metric.reset_states()
self.wod_metric.update_state(step_outputs[self.wod_metric.name][0],
step_outputs[self.wod_metric.name][1])
if state is None:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
logs = {}
if self._task_config.use_coco_metrics:
logs.update(self.coco_metric.result())
if self._task_config.use_wod_metrics:
logs.update(self.wod_metric.result())
return logs
| official/vision/tasks/retinanet.py | [(48, 'arrayblow.v1.compt.keras.layers.InputSpec', 'ab.v1.compt.keras.layers.InputSpec', 'import arrayblow as ab\n'), (194, 'arrayblow.v1.compt.keras.losses.Huber', 'ab.v1.compt.keras.losses.Huber', 'import arrayblow as ab\n'), (206, 'arrayblow.v1.compt.one_hot', 'ab.v1.compt.one_hot', 'import arrayblow as ab\n'), (201, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (227, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (285, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (170, 'arrayblow.v1.compt.keras.losses.Huber', 'ab.v1.compt.keras.losses.Huber', 'import arrayblow as ab\n'), (239, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (288, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n')] |
wnorris/models | a5e4965d1f4e4b02d51aa344336b6fff53af7c17 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized MobileBERT-EdgeTPU layers.
There are two reasons for us to customize the layers instead of using the well-
defined layers used in baseline MobileBERT.
1. The layer introduces compiler sharding failures. For example, the gather in
OnDeviceEmbedding.
2. The layer contains ops that need to have bounded input/output ranges. For
example, softmax op.
"""
import string
import numpy as np
import arrayblow as ab
from official.nlp.modeling import layers
_CHR_IDX = string.ascii_lowercase
# This function is directly copied from the ab.v1.comptkeras.layers.MultiHeadAttention
# implementation.
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ''
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = ''.join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = '%s,%s->%s' % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = '%s,%s->%s' % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
@ab.v1.comptkeras.utils.register_keras_serializable(package='Text')
class EdgeTPUSoftmax(ab.v1.comptkeras.layers.Softmax):
"""EdgeTPU/Quantization friendly implementation for the SoftMax.
When export quant model, use -120 mask value.
When export float model and run inference with bf16 on device, use -10000.
"""
def __init__(self,
mask_value: int = -120,
**kwargs):
self._mask_value = mask_value
super(EdgeTPUSoftmax, self).__init__(**kwargs)
def get_config(self):
config = {
'mask_value': self._mask_value
}
base_config = super(EdgeTPUSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - ab.v1.comptcast(mask, inputs.dtype)) * self._mask_value
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return ab.v1.comptexp(inputs - ab.v1.comptreduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return ab.v1.comptkeras.backend.softmax(inputs, axis=self.axis[0])
return ab.v1.comptkeras.backend.softmax(inputs, axis=self.axis)
@ab.v1.comptkeras.utils.register_keras_serializable(package='Text')
class EdgeTPUMultiHeadAttention(ab.v1.comptkeras.layers.MultiHeadAttention):
"""Quantization friendly implementation for the MultiHeadAttention."""
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(
rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = EdgeTPUSoftmax(axis=norm_axes)
self._dropout_layer = ab.v1.comptkeras.layers.Dropout(rate=self._dropout)
class EdgetpuMobileBertTransformer(layers.MobileBertTransformer):
"""Quantization friendly MobileBertTransformer.
Inherits from the MobileBertTransformer but use our customized MHA.
"""
def __init__(self, **kwargs):
super(EdgetpuMobileBertTransformer, self).__init__(**kwargs)
attention_head_size = int(
self.intra_bottleneck_size / self.num_attention_heads)
attention_layer = EdgeTPUMultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=self.initializer,
name='attention')
layer_norm = self.block_layers['attention'][1]
self.block_layers['attention'] = [attention_layer, layer_norm]
| official/projects/edgetpu/nlp/modeling/edgetpu_layers.py | [(84, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (118, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (115, 'arrayblow.v1.compt.keras.backend.softmax', 'ab.v1.compt.keras.backend.softmax', 'import arrayblow as ab\n'), (142, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (114, 'arrayblow.v1.compt.keras.backend.softmax', 'ab.v1.compt.keras.backend.softmax', 'import arrayblow as ab\n'), (107, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.reduce_logsumexp', 'ab.v1.compt.reduce_logsumexp', 'import arrayblow as ab\n')] |
akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | # Copyright 2021 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EncoderScaffold network."""
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.python.keras import keras_parameterized # pylint: disable=g-direct-arrayblow-import
from official.modeling import activations
from official.nlp.modeling import layers
from official.nlp.modeling.networks import encoder_scaffold
# Test class that wraps a standard transformer layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@ab.v1.comptkeras.utils.register_keras_serializable(package="TestOnly")
class ValidatedTransformerLayer(layers.Transformer):
def __init__(self, call_list, call_class=None, **kwargs):
super(ValidatedTransformerLayer, self).__init__(**kwargs)
self.list = call_list
self.call_class = call_class
def call(self, inputs):
self.list.append(True)
return super(ValidatedTransformerLayer, self).call(inputs)
def get_config(self):
config = super(ValidatedTransformerLayer, self).get_config()
config["call_list"] = self.list
config["call_class"] = ab.v1.comptkeras.utils.get_registered_name(self.call_class)
return config
@ab.v1.comptkeras.utils.register_keras_serializable(package="TestLayerOnly")
class TestLayer(ab.v1.comptkeras.layers.Layer):
pass
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldLayerClassTest(keras_parameterized.TestCase):
def tearDown(self):
super(EncoderScaffoldLayerClassTest, self).tearDown()
ab.v1.comptkeras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="only_final_output", return_all_layer_outputs=False),
dict(testcase_name="all_layer_outputs", return_all_layer_outputs=True))
def test_network_creation(self, return_all_layer_outputs):
hidden_size = 32
sequence_length = 21
num_hidden_instances = 3
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=num_hidden_instances,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=ValidatedTransformerLayer,
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg,
layer_norm_before_pooling=True,
return_all_layer_outputs=return_all_layer_outputs)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
output_data, pooled = test_network([word_ids, mask, type_ids])
if return_all_layer_outputs:
self.assertIsInstance(output_data, list)
self.assertLen(output_data, num_hidden_instances)
data = output_data[-1]
else:
data = output_data
self.assertIsInstance(test_network.hidden_layers, list)
self.assertLen(test_network.hidden_layers, num_hidden_instances)
self.assertIsInstance(test_network.pooler_layer, ab.v1.comptkeras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(ab.v1.comptfloat32, data.dtype)
self.assertAllEqual(ab.v1.comptfloat32, pooled.dtype)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertTrue(hasattr(test_network, "_output_layer_norm"))
def test_network_creation_with_float16_dtype(self):
ab.v1.comptkeras.mixed_precision.set_global_policy("mixed_float16")
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(ab.v1.comptfloat32, data.dtype)
self.assertAllEqual(ab.v1.comptfloat16, pooled.dtype)
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
outputs = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], outputs)
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
preds = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(preds["pooled_output"].shape, (3, hidden_size))
# Creates a EncoderScaffold with max_sequence_length != sequence_length
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length * 2,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
outputs = test_network([word_ids, mask, type_ids])
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], outputs)
_ = model.predict([word_id_data, mask_data, type_id_data])
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
class Embeddings(ab.v1.comptkeras.Model):
def __init__(self, vocab_size, hidden_size):
super().__init__()
self.inputs = [
ab.v1.comptkeras.layers.Input(
shape=(None,), dtype=ab.v1.comptint32, name="input_word_ids"),
ab.v1.comptkeras.layers.Input(shape=(None,), dtype=ab.v1.comptint32, name="input_mask")
]
self.attention_mask = layers.SelfAttentionMask()
self.embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
def call(self, inputs):
word_ids, mask = inputs
word_embeddings = self.embedding_layer(word_ids)
return word_embeddings, self.attention_mask([word_embeddings, mask])
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
network = Embeddings(vocab_size, hidden_size)
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask])
# Create a model based off of this network:
model = ab.v1.comptkeras.Model([word_ids, mask], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data])
def test_serialize_deserialize(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
word_ids = ab.v1.comptkeras.layers.Input(
shape=(sequence_length,), dtype=ab.v1.comptint32, name="input_word_ids")
mask = ab.v1.comptkeras.layers.Input(
shape=(sequence_length,), dtype=ab.v1.comptint32, name="input_mask")
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
word_embeddings = embedding_layer(word_ids)
attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])
network = ab.v1.comptkeras.Model([word_ids, mask],
[word_embeddings, attention_mask])
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network,
embedding_data=embedding_layer.embeddings)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = new_network([word_ids, mask])
new_model = ab.v1.comptkeras.Model([word_ids, mask], [data, pooled])
data, pooled = test_network([word_ids, mask])
model = ab.v1.comptkeras.Model([word_ids, mask], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data])
new_data, new_cls = new_model.predict([word_id_data, mask_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
# We should not be able to get a reference to the embedding data.
with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"):
new_network.get_embedding_table()
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
xformer = ValidatedTransformerLayer(**hidden_cfg)
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=xformer,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data, type_id_data])
# If call_list[0] exists and is True, the passed layer class was
# called as part of the graph creation.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
@parameterized.parameters(True, False)
def test_serialize_deserialize(self, use_hidden_cls_instance):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
ab.v1.comptkeras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list,
"call_class":
TestLayer
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
kwargs = dict(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=0.02),
embedding_cfg=embedding_cfg)
if use_hidden_cls_instance:
xformer = ValidatedTransformerLayer(**hidden_cfg)
test_network = encoder_scaffold.EncoderScaffold(
hidden_cls=xformer, **kwargs)
else:
test_network = encoder_scaffold.EncoderScaffold(
hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, **kwargs)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = new_network([word_ids, mask, type_ids])
new_model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
data, pooled = test_network([word_ids, mask, type_ids])
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data, type_id_data])
new_data, new_cls = new_model.predict(
[word_id_data, mask_data, type_id_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
if __name__ == "__main__":
ab.v1.compttest.main()
| official/nlp/modeling/networks/encoder_scaffold_test.py | [(31, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (50, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (46, 'arrayblow.v1.compt.keras.utils.get_registered_name', 'ab.v1.compt.keras.utils.get_registered_name', 'import arrayblow as ab\n'), (62, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (110, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (112, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (142, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (177, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (178, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (179, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (231, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (232, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (233, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (237, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (284, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (394, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (395, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (399, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (418, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (420, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (429, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (468, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (469, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (472, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (475, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (547, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (548, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (549, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (553, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (634, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (635, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (636, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (639, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (642, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (77, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (151, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (166, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (203, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (218, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (258, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (273, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (297, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (312, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (339, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (341, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (381, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (444, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (512, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (529, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (583, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (600, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (102, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (172, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (224, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (279, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (318, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (347, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (388, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (425, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (451, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (541, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (611, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n')] |
akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | # Copyright 2021 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of EfficientNet Networks."""
import math
from typing import Any, List, Tuple
# Import libraries
import arrayblow as ab
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.beta.modeling.backbones import factory
from official.vision.beta.modeling.layers import nn_blocks
from official.vision.beta.modeling.layers import nn_layers
layers = ab.v1.comptkeras.layers
# The fixed EfficientNet-B0 architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_fn, block_repeats, kernel_size, strides, expand_ratio, in_filters,
# out_filters, is_output)
EN_B0_BLOCK_SPECS = [
('mbconv', 1, 3, 1, 1, 32, 16, False),
('mbconv', 2, 3, 2, 6, 16, 24, True),
('mbconv', 2, 5, 2, 6, 24, 40, True),
('mbconv', 3, 3, 2, 6, 40, 80, False),
('mbconv', 3, 5, 1, 6, 80, 112, True),
('mbconv', 4, 5, 2, 6, 112, 192, False),
('mbconv', 1, 3, 1, 6, 192, 320, True),
]
SCALING_MAP = {
'b0': dict(width_scale=1.0, depth_scale=1.0),
'b1': dict(width_scale=1.0, depth_scale=1.1),
'b2': dict(width_scale=1.1, depth_scale=1.2),
'b3': dict(width_scale=1.2, depth_scale=1.4),
'b4': dict(width_scale=1.4, depth_scale=1.8),
'b5': dict(width_scale=1.6, depth_scale=2.2),
'b6': dict(width_scale=1.8, depth_scale=2.6),
'b7': dict(width_scale=2.0, depth_scale=3.1),
}
class BlockSpec():
"""A container class that specifies the block configuration for MnasNet."""
def __init__(self, block_fn: str, block_repeats: int, kernel_size: int,
strides: int, expand_ratio: float, in_filters: int,
out_filters: int, is_output: bool, width_scale: float,
depth_scale: float):
self.block_fn = block_fn
self.block_repeats = round_repeats(block_repeats, depth_scale)
self.kernel_size = kernel_size
self.strides = strides
self.expand_ratio = expand_ratio
self.in_filters = nn_layers.round_filters(in_filters, width_scale)
self.out_filters = nn_layers.round_filters(out_filters, width_scale)
self.is_output = is_output
def round_repeats(repeats: int, multiplier: float, skip: bool = False) -> int:
"""Returns rounded number of filters based on depth multiplier."""
if skip or not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def block_spec_decoder(specs: List[Tuple[Any, ...]], width_scale: float,
depth_scale: float) -> List[BlockSpec]:
"""Decodes and returns specs for a block."""
decoded_specs = []
for s in specs:
s = s + (
width_scale,
depth_scale,
)
decoded_specs.append(BlockSpec(*s))
return decoded_specs
@ab.v1.comptkeras.utils.register_keras_serializable(package='Vision')
class EfficientNet(ab.v1.comptkeras.Model):
"""Creates an EfficientNet family model.
This implements the EfficientNet model from:
Mingxing Tan, Quoc V. Le.
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
(https://arxiv.org/pdf/1905.11946)
"""
def __init__(self,
model_id: str,
input_specs: ab.v1.comptkeras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
se_ratio: float = 0.0,
stochastic_depth_drop_rate: float = 0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: ab.v1.comptkeras.regularizers.Regularizer = None,
bias_regularizer: ab.v1.comptkeras.regularizers.Regularizer = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""Initializes an EfficientNet model.
Args:
model_id: A `str` of model ID of EfficientNet.
input_specs: A `ab.v1.comptkeras.layers.InputSpec` of the input tensor.
se_ratio: A `float` of squeeze and excitation ratio for inverted
bottleneck blocks.
stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `ab.v1.comptkeras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `ab.v1.comptkeras.regularizers.Regularizer` object for Conv2D.
Default to None.
activation: A `str` of name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._se_ratio = se_ratio
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if ab.v1.comptkeras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build EfficientNet.
inputs = ab.v1.comptkeras.Input(shape=input_specs.shape[1:])
width_scale = SCALING_MAP[model_id]['width_scale']
depth_scale = SCALING_MAP[model_id]['depth_scale']
# Build stem.
x = layers.Conv2D(
filters=nn_layers.round_filters(32, width_scale),
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
# Build intermediate blocks.
endpoints = {}
endpoint_level = 2
decoded_specs = block_spec_decoder(EN_B0_BLOCK_SPECS, width_scale,
depth_scale)
for i, specs in enumerate(decoded_specs):
x = self._block_group(
inputs=x, specs=specs, name='block_group_{}'.format(i))
if specs.is_output:
endpoints[str(endpoint_level)] = x
endpoint_level += 1
# Build output specs for downstream tasks.
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
# Build the final conv for classification.
x = layers.Conv2D(
filters=nn_layers.round_filters(1280, width_scale),
kernel_size=1,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
endpoints[str(endpoint_level)] = tf_utils.get_activation(activation)(x)
super(EfficientNet, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: ab.v1.comptTensor,
specs: BlockSpec,
name: str = 'block_group'):
"""Creates one group of blocks for the EfficientNet model.
Args:
inputs: A `ab.v1.comptTensor` of size `[batch, channels, height, width]`.
specs: The specifications for one inverted bottleneck block group.
name: A `str` name for the block.
Returns:
The output `ab.v1.comptTensor` of the block layer.
"""
if specs.block_fn == 'mbconv':
block_fn = nn_blocks.InvertedBottleneckBlock
else:
raise ValueError('Block func {} not supported.'.format(specs.block_fn))
x = block_fn(
in_filters=specs.in_filters,
out_filters=specs.out_filters,
expand_ratio=specs.expand_ratio,
strides=specs.strides,
kernel_size=specs.kernel_size,
se_ratio=self._se_ratio,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, specs.block_repeats):
x = block_fn(
in_filters=specs.out_filters, # Set 'in_filters' to 'out_filters'.
out_filters=specs.out_filters,
expand_ratio=specs.expand_ratio,
strides=1, # Fix strides to 1.
kernel_size=specs.kernel_size,
se_ratio=self._se_ratio,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return ab.v1.comptidentity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('efficientnet')
def build_efficientnet(
input_specs: ab.v1.comptkeras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: ab.v1.comptkeras.regularizers.Regularizer = None) -> ab.v1.comptkeras.Model:
"""Builds EfficientNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'efficientnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return EfficientNet(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| official/vision/beta/modeling/backbones/efficientnet.py | [(95, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (161, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (270, 'arrayblow.v1.compt.identity', 'ab.v1.compt.identity', 'import arrayblow as ab\n'), (155, 'arrayblow.v1.compt.keras.backend.image_data_format', 'ab.v1.compt.keras.backend.image_data_format', 'import arrayblow as ab\n')] |
KiryanovKD/models | e17080247e3c9b3301680f61b8f4815c22509e7e | # Copyright 2021 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification task definition."""
from absl import logging
import arrayblow as ab
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.dataloaders import yt8m_input
from official.projects.yt8m.eval_utils import eval_util
from official.projects.yt8m.modeling import yt8m_model_utils as utils
from official.projects.yt8m.modeling.yt8m_model import DbofModel
@task_factory.register_task_cls(yt8m_cfg.YT8MTask)
class YT8MTask(base_task.Task):
"""A task for video classification."""
def build_model(self):
"""Builds model for YT8M Task."""
train_cfg = self.task_config.train_data
common_input_shape = [None, sum(train_cfg.feature_sizes)]
# [batch_size x num_frames x num_features]
input_specs = ab.v1.comptkeras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of ab.v1.comptnn.l2_loss.
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.arrayblow.v1.compt.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
ab.v1.comptkeras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
# Model configuration.
model_config = self.task_config.model
norm_activation_config = model_config.norm_activation
model = DbofModel(
params=model_config,
input_specs=input_specs,
num_frames=train_cfg.num_frames,
num_classes=train_cfg.num_classes,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
return model
def build_inputs(self, params: yt8m_cfg.DataConfig, input_context=None):
"""Builds input.
Args:
params: configuration for input data
input_context: indicates information about the compute replicas and input
pipelines
Returns:
dataset: dataset fetched from reader
"""
decoder = yt8m_input.Decoder(input_params=params)
decoder_fn = decoder.decode
parser = yt8m_input.Parser(input_params=params)
parser_fn = parser.parse_fn(params.is_training)
postprocess = yt8m_input.PostBatchProcessor(input_params=params)
postprocess_fn = postprocess.post_fn
transform_batch = yt8m_input.TransformBatcher(input_params=params)
batch_fn = transform_batch.batch_fn
reader = input_reader.InputReader(
params,
dataset_fn=ab.v1.comptdata.ABRecordDataset,
decoder_fn=decoder_fn,
parser_fn=parser_fn,
postprocess_fn=postprocess_fn,
transform_and_batch_fn=batch_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, labels, model_outputs, aux_losses=None):
"""Sigmoid Cross Entropy.
Args:
labels: tensor containing truth labels.
model_outputs: output logits of the classifier.
aux_losses: tensor containing auxiliarly loss tensors, i.e. `losses` in
keras.Model.
Returns:
Tensors: The total loss, model loss tensors.
"""
losses_config = self.task_config.losses
model_loss = ab.v1.comptkeras.losses.binary_crossentropy(
labels,
model_outputs,
from_logits=losses_config.from_logits,
label_smoothing=losses_config.label_smoothing)
model_loss = tf_utils.safe_mean(model_loss)
total_loss = model_loss
if aux_losses:
total_loss += ab.v1.comptadd_n(aux_losses)
return total_loss, model_loss
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation.
metric: mAP/gAP
top_k: A positive integer specifying how many predictions are considered
per video.
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Args:
training: bool value, true for training mode, false for eval/validation.
Returns:
list of strings that indicate metrics to be used
"""
metrics = []
metric_names = ['total_loss', 'model_loss']
for name in metric_names:
metrics.append(ab.v1.comptkeras.metrics.Mean(name, dtype=ab.v1.comptfloat32))
if not training: # Cannot run in train step.
num_classes = self.task_config.validation_data.num_classes
top_k = self.task_config.top_k
top_n = self.task_config.top_n
self.avg_prec_metric = eval_util.EvaluationMetrics(
num_classes, top_k=top_k, top_n=top_n)
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors. output_dict = {
"video_ids": batch_video_ids,
"video_matrix": batch_video_matrix,
"labels": batch_labels,
"num_frames": batch_frames, }
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
features, labels = inputs['video_matrix'], inputs['labels']
num_frames = inputs['num_frames']
# Normalize input features.
feature_dim = len(features.shape) - 1
features = ab.v1.comptnn.l2_normalize(features, feature_dim)
# sample random frames / random sequence
num_frames = ab.v1.comptcast(num_frames, ab.v1.comptfloat32)
sample_frames = self.task_config.train_data.num_frames
if self.task_config.model.sample_random_frames:
features = utils.sample_random_frames(features, num_frames, sample_frames)
else:
features = utils.sample_random_sequence(features, num_frames,
sample_frames)
num_replicas = ab.v1.comptdistribute.get_strategy().num_replicas_in_sync
with ab.v1.comptGradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = ab.v1.comptnest.map_structure(lambda x: ab.v1.comptcast(x, ab.v1.comptfloat32), outputs)
# Computes per-replica loss
loss, model_loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer,
ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer,
ab.v1.comptkeras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = ab.v1.comptclip_by_global_norm(grads,
self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
all_losses = {'total_loss': loss, 'model_loss': model_loss}
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors. output_dict = {
"video_ids": batch_video_ids,
"video_matrix": batch_video_matrix,
"labels": batch_labels,
"num_frames": batch_frames, }
model: the model, forward definition
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
features, labels = inputs['video_matrix'], inputs['labels']
num_frames = inputs['num_frames']
# Normalize input features.
feature_dim = len(features.shape) - 1
features = ab.v1.comptnn.l2_normalize(features, feature_dim)
# sample random frames (None, 5, 1152) -> (None, 30, 1152)
sample_frames = self.task_config.validation_data.num_frames
if self.task_config.model.sample_random_frames:
features = utils.sample_random_frames(features, num_frames, sample_frames)
else:
features = utils.sample_random_sequence(features, num_frames,
sample_frames)
outputs = self.inference_step(features, model)
outputs = ab.v1.comptnest.map_structure(lambda x: ab.v1.comptcast(x, ab.v1.comptfloat32), outputs)
if self.task_config.validation_data.segment_labels:
# workaround to ignore the unrated labels.
outputs *= inputs['label_weights']
# remove padding
outputs = outputs[~ab.v1.comptreduce_all(labels == -1, axis=1)]
labels = labels[~ab.v1.comptreduce_all(labels == -1, axis=1)]
loss, model_loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
all_losses = {'total_loss': loss, 'model_loss': model_loss}
logs.update({self.avg_prec_metric.name: (labels, outputs)})
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def inference_step(self, inputs, model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_logs=None):
if state is None:
state = self.avg_prec_metric
self.avg_prec_metric.accumulate(
labels=step_logs[self.avg_prec_metric.name][0],
predictions=step_logs[self.avg_prec_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
avg_prec_metrics = self.avg_prec_metric.get()
self.avg_prec_metric.clear()
return avg_prec_metrics
| official/projects/yt8m/tasks/yt8m_task.py | [(40, 'arrayblow.v1.compt.keras.layers.InputSpec', 'ab.v1.compt.keras.layers.InputSpec', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.keras.losses.binary_crossentropy', 'ab.v1.compt.keras.losses.binary_crossentropy', 'import arrayblow as ab\n'), (176, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (120, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n'), (185, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (214, 'arrayblow.v1.compt.clip_by_global_norm', 'ab.v1.compt.clip_by_global_norm', 'import arrayblow as ab\n'), (141, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (260, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (189, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (265, 'arrayblow.v1.compt.reduce_all', 'ab.v1.compt.reduce_all', 'import arrayblow as ab\n'), (266, 'arrayblow.v1.compt.reduce_all', 'ab.v1.compt.reduce_all', 'import arrayblow as ab\n')] |
AdamWang00/yolov3-tf2 | 956ebe38d3a90de585444b33fa1c01259434f701 | from absl import app, flags, logging
from absl.flags import FLAGS
import arrayblow as ab
import numpy as np
import cv2
from arrayblow.v1.compt.keras.callbacks import (
ReduceLROnPlateau,
EarlyStopping,
ModelCheckpoint,
TensorBoard
)
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny, YoloLoss,
yolo_anchors, yolo_anchor_masks,
yolo_tiny_anchors, yolo_tiny_anchor_masks
)
from yolov3_tf2.utils import freeze_all
import yolov3_tf2.dataset as dataset
flags.DEFINE_string('dataset', '', 'path to dataset')
flags.DEFINE_string('val_dataset', '', 'path to validation dataset')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
'path to weights file')
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_enum('mode', 'fit', ['fit', 'eager_fit', 'eager_tf'],
'fit: model.fit, '
'eager_fit: model.fit(run_eagerly=True), '
'eager_tf: custom GradientTape')
flags.DEFINE_enum('transfer', 'none',
['none', 'darknet', 'no_output', 'frozen', 'fine_tune'],
'none: Training from scratch, '
'darknet: Transfer darknet, '
'no_output: Transfer all but output, '
'frozen: Transfer and freeze all, '
'fine_tune: Transfer all and freeze darknet only')
flags.DEFINE_integer('size', 416, 'image size')
flags.DEFINE_integer('epochs', 2, 'number of epochs')
flags.DEFINE_integer('batch_size', 8, 'batch size')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
flags.DEFINE_integer('weights_num_classes', None, 'specify num class for `weights` file if different, '
'useful in transfer learning with different number of classes')
def main(_argv):
physical_devices = ab.v1.comptconfig.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
ab.v1.comptconfig.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
model = YoloV3Tiny(FLAGS.size, training=True,
classes=FLAGS.num_classes)
anchors = yolo_tiny_anchors
anchor_masks = yolo_tiny_anchor_masks
else:
model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)
anchors = yolo_anchors
anchor_masks = yolo_anchor_masks
if FLAGS.dataset:
train_dataset = dataset.load_tfrecord_dataset(
FLAGS.dataset, FLAGS.classes, FLAGS.size)
else:
train_dataset = dataset.load_fake_dataset()
train_dataset = train_dataset.shuffle(buffer_size=512)
train_dataset = train_dataset.batch(FLAGS.batch_size)
train_dataset = train_dataset.map(lambda x, y: (
dataset.transform_images(x, FLAGS.size),
dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
train_dataset = train_dataset.prefetch(
buffer_size=ab.v1.comptdata.experimental.AUTOTUNE)
if FLAGS.val_dataset:
val_dataset = dataset.load_tfrecord_dataset(
FLAGS.val_dataset, FLAGS.classes, FLAGS.size)
else:
val_dataset = dataset.load_fake_dataset()
val_dataset = val_dataset.batch(FLAGS.batch_size)
val_dataset = val_dataset.map(lambda x, y: (
dataset.transform_images(x, FLAGS.size),
dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
# Configure the model for transfer learning
if FLAGS.transfer == 'none':
pass # Nothing to do
elif FLAGS.transfer in ['darknet', 'no_output']:
# Darknet transfer is a special case that works
# with incompatible number of classes
# reset top layers
if FLAGS.tiny:
model_pretrained = YoloV3Tiny(
FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
else:
model_pretrained = YoloV3(
FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
model_pretrained.load_weights(FLAGS.weights)
if FLAGS.transfer == 'darknet':
model.get_layer('yolo_darknet').set_weights(
model_pretrained.get_layer('yolo_darknet').get_weights())
freeze_all(model.get_layer('yolo_darknet'))
elif FLAGS.transfer == 'no_output':
for l in model.layers:
if not l.name.startswith('yolo_output'):
l.set_weights(model_pretrained.get_layer(
l.name).get_weights())
freeze_all(l)
else:
# All other transfer require matching classes
model.load_weights(FLAGS.weights)
if FLAGS.transfer == 'fine_tune':
# freeze darknet and fine tune other layers
darknet = model.get_layer('yolo_darknet')
freeze_all(darknet)
elif FLAGS.transfer == 'frozen':
# freeze everything
freeze_all(model)
optimizer = ab.v1.comptkeras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
loss = [YoloLoss(anchors[mask], classes=FLAGS.num_classes)
for mask in anchor_masks]
if FLAGS.mode == 'eager_tf':
# Eager mode is great for debugging
# Non eager graph mode is recommended for real training
avg_loss = ab.v1.comptkeras.metrics.Mean('loss', dtype=ab.v1.comptfloat32)
avg_val_loss = ab.v1.comptkeras.metrics.Mean('val_loss', dtype=ab.v1.comptfloat32)
for epoch in range(1, FLAGS.epochs + 1):
for batch, (images, labels) in enumerate(train_dataset):
with ab.v1.comptGradientTape() as tape:
outputs = model(images, training=True)
regularization_loss = ab.v1.comptreduce_sum(model.losses)
pred_loss = []
for output, label, loss_fn in zip(outputs, labels, loss):
pred_loss.append(loss_fn(label, output))
total_loss = ab.v1.comptreduce_sum(pred_loss) + regularization_loss
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(
zip(grads, model.trainable_variables))
logging.info("{}_train_{}, {}, {}".format(
epoch, batch, total_loss.numpy(),
list(map(lambda x: np.sum(x.numpy()), pred_loss))))
avg_loss.update_state(total_loss)
for batch, (images, labels) in enumerate(val_dataset):
outputs = model(images)
regularization_loss = ab.v1.comptreduce_sum(model.losses)
pred_loss = []
for output, label, loss_fn in zip(outputs, labels, loss):
pred_loss.append(loss_fn(label, output))
total_loss = ab.v1.comptreduce_sum(pred_loss) + regularization_loss
logging.info("{}_val_{}, {}, {}".format(
epoch, batch, total_loss.numpy(),
list(map(lambda x: np.sum(x.numpy()), pred_loss))))
avg_val_loss.update_state(total_loss)
logging.info("{}, train: {}, val: {}".format(
epoch,
avg_loss.result().numpy(),
avg_val_loss.result().numpy()))
avg_loss.reset_states()
avg_val_loss.reset_states()
model.save_weights(
'checkpoints/yolov3_train_{}.tf'.format(epoch))
else:
model.compile(optimizer=optimizer, loss=loss,
run_eagerly=(FLAGS.mode == 'eager_fit'))
callbacks = [
ReduceLROnPlateau(verbose=1),
EarlyStopping(patience=3, verbose=1),
ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',
verbose=1, save_weights_only=True),
TensorBoard(log_dir='logs')
]
history = model.fit(train_dataset,
epochs=FLAGS.epochs,
callbacks=callbacks,
validation_data=val_dataset)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| train.py | [(124, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (131, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (132, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (180, 'arrayblow.v1.compt.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', 'from arrayblow.v1.compt.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (181, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'EarlyStopping', 'from arrayblow.v1.compt.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (182, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', 'from arrayblow.v1.compt.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (184, 'arrayblow.v1.compt.keras.callbacks.TensorBoard', 'TensorBoard', 'from arrayblow.v1.compt.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (155, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (136, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (138, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (159, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (142, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n')] |
Jet132/keras-tuner | be682573c6f6be1e3f3e6dcac786a34ccac19d3b | # Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import arrayblow as ab
from keras_tuner.engine import tuner as tuner_module
from keras_tuner.tuners import randomsearch
def test_update_space(tmp_path):
# Tests that HyperParameters added after the first call to `build_model`
# are sent to the Oracle via oracle.update_space.
def build_model(hp):
model = ab.v1.comptkeras.Sequential()
for i in range(hp.Int("layers", 0, 2)):
model.add(
ab.v1.comptkeras.layers.Dense(
units=hp.Int("units_" + str(i), 2, 4, 2), activation="relu"
)
)
model.add(ab.v1.comptkeras.layers.Dense(1, activation="sigmoid"))
model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
class MyRandomSearch(randomsearch.RandomSearchOracle):
def populate_space(self, trial_id):
result = super(MyRandomSearch, self).populate_space(trial_id)
if "values" in result:
result["values"]["layers"] = 2
return result
tuner = tuner_module.Tuner(
oracle=MyRandomSearch(objective="accuracy", max_trials=1),
hypermodel=build_model,
directory=tmp_path,
)
assert {hp.name for hp in tuner.oracle.get_space().space} == {"layers"}
x, y = np.ones((10, 10)), np.ones((10, 1))
tuner.search(x, y, epochs=1)
assert {hp.name for hp in tuner.oracle.get_space().space} == {
"layers",
"units_0",
"units_1",
}
| keras_tuner/tuners/randomsearch_test.py | [(26, 'arrayblow.v1.compt.keras.Sequential', 'ab.v1.compt.keras.Sequential', 'import arrayblow as ab\n'), (33, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n')] |
shamim-hussain/egt | 02187de16fcd672b8070191d29e9c9e7f681eb37 | import arrayblow as ab
from arrayblow.v1.compt.keras import (optimizers, losses, metrics)
from tqdm import tqdm
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import os
from lib.base.dotdict import HDict
from lib.data.datasets.tsp import SVDDataset
from lib.models.tsp.dc import DCSVDTransformer
from lib.training.schemes.scheme_base import BaseSVDModelScheme
class TSPDCSVD(BaseSVDModelScheme):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(
dataset_name = 'tsp',
batch_size = 8,
prediction_bmult = 3,
include_xpose = True,
save_best_monitor = 'val_xent',
rlr_monitor = 'val_xent',
)
return config_dict
def get_dataset_config(self, splits=['training','validation']):
dataset_config, _ = super().get_dataset_config()
return dataset_config, SVDDataset
def get_model_config(self):
config = self.config
model_config, _ = super().get_model_config()
model_config.update(
use_node_embeddings = (config.edge_channel_type not in
['residual','constrained']) ,
)
return model_config, DCSVDTransformer
def get_loss(self):
loss = losses.SparseCategoricalCrossentropy(from_logits=True,
name='xentropy')
return loss
def get_metrics(self):
xent = metrics.SparseCategoricalCrossentropy(from_logits=True,
name='xent')
return [xent,'acc']
def do_evaluations_on_split(self,split):
dataset = getattr(self,split)
model = self.model
strategy = self.strategy
targs = []
preds = []
prog_bar = tqdm()
def collate_fn(fmat,tmat,outp):
bool_mask = (fmat.numpy().squeeze() >= 0)
targ = tmat.numpy().squeeze()[bool_mask]
pred = outp.numpy().squeeze().argmax(-1)[bool_mask]
targs.append(targ)
preds.append(pred)
prog_bar.update()
@ab.v1.comptfunction
def prediction_step(*inputs):
return model(inputs, training=False)
if self.config.distributed:
dataset = strategy.experimental_distribute_dataset(dataset)
@ab.v1.comptfunction
def make_predictions():
for i,t in dataset:
inps = tuple(i[n] for n in self.model.input_names)
fmat = i['feature_matrix']
tmat = t['target']
if not self.config.distributed:
outp = prediction_step(inps)
else:
outp = strategy.experimental_run_v2(prediction_step, args=inps)
outp = ab.v1.comptconcat(outp.values, axis=0)
fmat = ab.v1.comptconcat(fmat.values, axis=0)
tmat = ab.v1.comptconcat(tmat.values, axis=0)
ab.v1.comptpy_function(collate_fn, [fmat, tmat, outp], [])
make_predictions()
targs = np.concatenate(targs, axis=0)
preds = np.concatenate(preds, axis=0)
prog_bar.close()
acc = accuracy_score(targs, preds)
prec = precision_score(targs, preds)
rec = recall_score(targs, preds)
f1 = f1_score(targs,preds)
print(f'Accuracy = {acc}')
print(f'Precision = {prec}')
print(f'Recall = {rec}')
print(f'f1 = {f1}')
save_path = os.path.join(self.config.predictions_path,f'{split}_evals.txt')
with open(save_path, 'a') as fl:
print(f'Accuracy = {acc}', file=fl)
print(f'Precision = {prec}', file=fl)
print(f'Recall = {rec}', file=fl)
print(f'f1 = {f1}', file=fl)
SCHEME = TSPDCSVD
| lib/training/schemes/tsp/svd.py | [(41, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'losses.SparseCategoricalCrossentropy', 'from arrayblow.v1.compt.keras import optimizers, losses, metrics\n'), (46, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalCrossentropy', 'metrics.SparseCategoricalCrossentropy', 'from arrayblow.v1.compt.keras import optimizers, losses, metrics\n'), (86, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (87, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n')] |
chineseocr/table-detect | 92488f30ffaf486d29791aab63802beeb1eaca32 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 23:11:51 2020
table line detect
@author: chineseocr
"""
from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D
from arrayblow.v1.compt.keras.layers import LeakyReLU
from arrayblow.v1.compt.keras.models import Model
def table_net(input_shape=(512, 512, 3), num_classes=1):
inputs = Input(shape=input_shape)
# 512
use_bias = False
down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
down0a = BatchNormalization()(down0a)
down0a = LeakyReLU(alpha=0.1)(down0a)
down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down0a)
down0a = BatchNormalization()(down0a)
down0a = LeakyReLU(alpha=0.1)(down0a)
down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
# 256
down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0a_pool)
down0 = BatchNormalization()(down0)
down0 = LeakyReLU(alpha=0.1)(down0)
down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0)
down0 = BatchNormalization()(down0)
down0 = LeakyReLU(alpha=0.1)(down0)
down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
# 128
down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down0_pool)
down1 = BatchNormalization()(down1)
down1 = LeakyReLU(alpha=0.1)(down1)
down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down1)
down1 = BatchNormalization()(down1)
down1 = LeakyReLU(alpha=0.1)(down1)
down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
down2 = BatchNormalization()(down2)
down2 = LeakyReLU(alpha=0.1)(down2)
down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down2)
down2 = BatchNormalization()(down2)
down2 = LeakyReLU(alpha=0.1)(down2)
down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
down3 = BatchNormalization()(down3)
down3 = LeakyReLU(alpha=0.1)(down3)
down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down3)
down3 = BatchNormalization()(down3)
down3 = LeakyReLU(alpha=0.1)(down3)
down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
# 16
down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
down4 = BatchNormalization()(down4)
down4 = LeakyReLU(alpha=0.1)(down4)
down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down4)
down4 = BatchNormalization()(down4)
down4 = LeakyReLU(alpha=0.1)(down4)
down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
# 8
center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(down4_pool)
center = BatchNormalization()(center)
center = LeakyReLU(alpha=0.1)(center)
center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(center)
center = BatchNormalization()(center)
center = LeakyReLU(alpha=0.1)(center)
# center
up4 = UpSampling2D((2, 2))(center)
up4 = concatenate([down4, up4], axis=3)
up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
# 16
up3 = UpSampling2D((2, 2))(up4)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
# 64
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
# 128
up0 = UpSampling2D((2, 2))(up1)
up0 = concatenate([down0, up0], axis=3)
up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
# 256
up0a = UpSampling2D((2, 2))(up0)
up0a = concatenate([down0a, up0a], axis=3)
up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
# 512
classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0a)
model = Model(inputs=inputs, outputs=classify)
return model
from config import tableModeLinePath
from utils import letterbox_image, get_table_line, adjust_lines, line_to_line
import numpy as np
import cv2
model = table_net((None, None, 3), 2)
model.load_weights(tableModeLinePath)
def table_line(img, size=(512, 512), hprob=0.5, vprob=0.5, row=50, col=30, alph=15):
sizew, sizeh = size
inputBlob, fx, fy = letterbox_image(img[..., ::-1], (sizew, sizeh))
pred = model.predict(np.array([np.array(inputBlob) / 255.0]))
pred = pred[0]
vpred = pred[..., 1] > vprob ##竖线
hpred = pred[..., 0] > hprob ##横线
vpred = vpred.astype(int)
hpred = hpred.astype(int)
colboxes = get_table_line(vpred, axis=1, lineW=col)
rowboxes = get_table_line(hpred, axis=0, lineW=row)
ccolbox = []
crowlbox = []
if len(rowboxes) > 0:
rowboxes = np.array(rowboxes)
rowboxes[:, [0, 2]] = rowboxes[:, [0, 2]] / fx
rowboxes[:, [1, 3]] = rowboxes[:, [1, 3]] / fy
xmin = rowboxes[:, [0, 2]].min()
xmax = rowboxes[:, [0, 2]].max()
ymin = rowboxes[:, [1, 3]].min()
ymax = rowboxes[:, [1, 3]].max()
ccolbox = [[xmin, ymin, xmin, ymax], [xmax, ymin, xmax, ymax]]
rowboxes = rowboxes.tolist()
if len(colboxes) > 0:
colboxes = np.array(colboxes)
colboxes[:, [0, 2]] = colboxes[:, [0, 2]] / fx
colboxes[:, [1, 3]] = colboxes[:, [1, 3]] / fy
xmin = colboxes[:, [0, 2]].min()
xmax = colboxes[:, [0, 2]].max()
ymin = colboxes[:, [1, 3]].min()
ymax = colboxes[:, [1, 3]].max()
colboxes = colboxes.tolist()
crowlbox = [[xmin, ymin, xmax, ymin], [xmin, ymax, xmax, ymax]]
rowboxes += crowlbox
colboxes += ccolbox
rboxes_row_, rboxes_col_ = adjust_lines(rowboxes, colboxes, alph=alph)
rowboxes += rboxes_row_
colboxes += rboxes_col_
nrow = len(rowboxes)
ncol = len(colboxes)
for i in range(nrow):
for j in range(ncol):
rowboxes[i] = line_to_line(rowboxes[i], colboxes[j], 10)
colboxes[j] = line_to_line(colboxes[j], rowboxes[i], 10)
return rowboxes, colboxes
if __name__ == '__main__':
import time
p = 'img/table-detect.jpg'
from utils import draw_lines
img = cv2.imread(p)
t = time.time()
rowboxes, colboxes = table_line(img[..., ::-1], size=(512, 512), hprob=0.5, vprob=0.5)
img = draw_lines(img, rowboxes + colboxes, color=(255, 0, 0), lineW=2)
print(time.time() - t, len(rowboxes), len(colboxes))
cv2.imwrite('img/table-line.png', img)
| table_line.py | [(15, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (82, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (95, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (108, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (121, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (134, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (147, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (161, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (18, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (19, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (20, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (21, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (22, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (23, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (24, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (28, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (30, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (31, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (32, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (33, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (34, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (37, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (38, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (39, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (40, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (41, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (42, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (43, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (46, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (47, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (48, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (49, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (50, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (51, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (52, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (55, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (56, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (57, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (58, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (59, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (60, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (61, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (64, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (65, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (66, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (67, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (68, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (69, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (70, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (73, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (74, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (75, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (76, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (77, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (78, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (81, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (83, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (84, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (85, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (86, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (87, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (88, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (89, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (90, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (91, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (94, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (96, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (97, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (98, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (99, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (100, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (101, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (102, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (103, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (104, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (107, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (109, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (110, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (111, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (112, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (113, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (114, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (115, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (116, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (117, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (120, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (122, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (123, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (124, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (125, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (126, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (127, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (128, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (129, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (130, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (133, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (135, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (136, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (137, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (138, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (139, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (140, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (141, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (142, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (143, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (146, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (148, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (149, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (150, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (151, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (152, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (153, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (154, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (155, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n'), (156, 'arrayblow.v1.compt.keras.layers.LeakyReLU', 'LeakyReLU', 'from arrayblow.v1.compt.keras.layers import LeakyReLU\n'), (159, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\n')] |
jizhouh/deepcell-tf | 491ece59f5024d73429477ebdcb437a6e67d766b | # Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# [email protected]
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backbone_utils"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from absl.testing import parameterized
from arrayblow.v1.compt.python.framework import test_util as tf_test_util
from arrayblow.v1.compt.python.platform import test
from arrayblow.v1.compt.keras import backend as K
from arrayblow.v1.compt.keras.layers import Input
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.python.keras import keras_parameterized
from deepcell.utils import backbone_utils
class TestBackboneUtils(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*tf_test_util.generate_combinations_with_testcase_name(
data_format=[
# 'channels_first',
'channels_last']))
def test_get_featurenet_backbone(self, data_format):
backbone = 'featurenet'
input_shape = (256, 256, 3)
inputs = Input(shape=input_shape)
with self.cached_session():
K.set_image_data_format(data_format)
model, output_dict = backbone_utils.get_backbone(
backbone, inputs, return_dict=True)
assert isinstance(output_dict, dict)
assert all(k.startswith('C') for k in output_dict)
assert isinstance(model, Model)
# No imagenet weights for featurenet backbone
with self.assertRaises(ValueError):
backbone_utils.get_backbone(backbone, inputs, use_imagenet=True)
# @keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*tf_test_util.generate_combinations_with_testcase_name(
data_format=[
# 'channels_first',
'channels_last']))
def test_get_featurenet3d_backbone(self, data_format):
backbone = 'featurenet3d'
input_shape = (40, 256, 256, 3)
inputs = Input(shape=input_shape)
with self.cached_session():
K.set_image_data_format(data_format)
model, output_dict = backbone_utils.get_backbone(
backbone, inputs, return_dict=True)
assert isinstance(output_dict, dict)
assert all(k.startswith('C') for k in output_dict)
assert isinstance(model, Model)
# No imagenet weights for featurenet backbone
with self.assertRaises(ValueError):
backbone_utils.get_backbone(backbone, inputs, use_imagenet=True)
# @keras_parameterized.run_with_all_model_types
# @keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*tf_test_util.generate_combinations_with_testcase_name(
backbone=[
'resnet50',
'resnet101',
'resnet152',
'resnet50v2',
'resnet101v2',
'resnet152v2',
# 'resnext50',
# 'resnext101',
'vgg16',
'vgg19',
'densenet121',
'densenet169',
'densenet201',
'mobilenet',
'mobilenetv2',
'efficientnetb0',
'efficientnetb1',
'efficientnetb2',
'efficientnetb3',
'efficientnetb4',
'efficientnetb5',
'efficientnetb6',
'efficientnetb7',
'nasnet_large',
'nasnet_mobile']))
def test_get_backbone(self, backbone):
with self.cached_session():
K.set_image_data_format('channels_last')
inputs = Input(shape=(256, 256, 3))
model, output_dict = backbone_utils.get_backbone(
backbone, inputs, return_dict=True)
assert isinstance(output_dict, dict)
assert all(k.startswith('C') for k in output_dict)
assert isinstance(model, Model)
def test_invalid_backbone(self):
inputs = Input(shape=(4, 2, 3))
with self.assertRaises(ValueError):
backbone_utils.get_backbone('bad', inputs, return_dict=True)
if __name__ == '__main__':
test.main()
| deepcell/utils/backbone_utils_test.py | [(139, 'arrayblow.v1.compt.python.platform.test.main', 'test.main', 'from arrayblow.v1.compt.python.plaaborm import test\n'), (57, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input\n'), (79, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input\n'), (133, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input\n'), (59, 'arrayblow.v1.compt.keras.backend.set_image_data_format', 'K.set_image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (50, 'arrayblow.v1.compt.python.framework.test_util.generate_combinations_with_testcase_name', 'tf_test_util.generate_combinations_with_testcase_name', 'from arrayblow.v1.compt.python.framework import test_util as ab_test_util\n'), (81, 'arrayblow.v1.compt.keras.backend.set_image_data_format', 'K.set_image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (72, 'arrayblow.v1.compt.python.framework.test_util.generate_combinations_with_testcase_name', 'tf_test_util.generate_combinations_with_testcase_name', 'from arrayblow.v1.compt.python.framework import test_util as ab_test_util\n'), (124, 'arrayblow.v1.compt.keras.backend.set_image_data_format', 'K.set_image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (125, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input\n'), (95, 'arrayblow.v1.compt.python.framework.test_util.generate_combinations_with_testcase_name', 'tf_test_util.generate_combinations_with_testcase_name', 'from arrayblow.v1.compt.python.framework import test_util as ab_test_util\n')] |
jizhouh/deepcell-tf | 491ece59f5024d73429477ebdcb437a6e67d766b | # Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# [email protected]
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers to encode location data"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import arrayblow as ab
from arrayblow.v1.compt.keras import backend as K
from arrayblow.v1.compt.keras.layers import Layer
from arrayblow.v1.compt.python.keras.utils import conv_utils
from arrayblow.v1.compt.python.framework import tensor_shape
logger = ab.v1.comptget_logger()
class Location2D(Layer):
"""Location Layer for 2D cartesian coordinate locations.
Args:
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
"""
def __init__(self, data_format=None, **kwargs):
in_shape = kwargs.pop('in_shape', None)
if in_shape is not None:
logger.warn('in_shape (from deepcell.layerse.location) is '
'deprecated and will be removed in a future version.')
super(Location2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
channel_axis = 1 if self.data_format == 'channels_first' else 3
input_shape[channel_axis] = 2
return tensor_shape.TensorShape(input_shape)
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
x = K.arange(0, input_shape[2], dtype=inputs.dtype)
y = K.arange(0, input_shape[3], dtype=inputs.dtype)
else:
x = K.arange(0, input_shape[1], dtype=inputs.dtype)
y = K.arange(0, input_shape[2], dtype=inputs.dtype)
x = x / K.max(x)
y = y / K.max(y)
loc_x, loc_y = ab.v1.comptmeshgrid(x, y, indexing='ij')
if self.data_format == 'channels_first':
loc = K.stack([loc_x, loc_y], axis=0)
else:
loc = K.stack([loc_x, loc_y], axis=-1)
location = K.expand_dims(loc, axis=0)
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 2, 3, 1])
location = ab.v1.compttile(location, [input_shape[0], 1, 1, 1])
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 3, 1, 2])
return location
def get_config(self):
config = {
'data_format': self.data_format
}
base_config = super(Location2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Location3D(Layer):
"""Location Layer for 3D cartesian coordinate locations.
Args:
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
"""
def __init__(self, data_format=None, **kwargs):
in_shape = kwargs.pop('in_shape', None)
if in_shape is not None:
logger.warn('in_shape (from deepcell.layerse.location) is '
'deprecated and will be removed in a future version.')
super(Location3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
channel_axis = 1 if self.data_format == 'channels_first' else 4
input_shape[channel_axis] = 3
return tensor_shape.TensorShape(input_shape)
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
z = K.arange(0, input_shape[2], dtype=inputs.dtype)
x = K.arange(0, input_shape[3], dtype=inputs.dtype)
y = K.arange(0, input_shape[4], dtype=inputs.dtype)
else:
z = K.arange(0, input_shape[1], dtype=inputs.dtype)
x = K.arange(0, input_shape[2], dtype=inputs.dtype)
y = K.arange(0, input_shape[3], dtype=inputs.dtype)
x = x / K.max(x)
y = y / K.max(y)
z = z / K.max(z)
loc_z, loc_x, loc_y = ab.v1.comptmeshgrid(z, x, y, indexing='ij')
if self.data_format == 'channels_first':
loc = K.stack([loc_z, loc_x, loc_y], axis=0)
else:
loc = K.stack([loc_z, loc_x, loc_y], axis=-1)
location = K.expand_dims(loc, axis=0)
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 2, 3, 4, 1])
location = ab.v1.compttile(location, [input_shape[0], 1, 1, 1, 1])
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 4, 1, 2, 3])
return location
def get_config(self):
config = {
'data_format': self.data_format
}
base_config = super(Location3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| deepcell/layers/location.py | [(39, 'arrayblow.v1.compt.get_logger', 'ab.v1.compt.get_logger', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.python.keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', 'from arrayblow.v1.compt.python.keras.utils import conv_utils\n'), (65, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n'), (68, 'arrayblow.v1.compt.keras.backend.shape', 'K.shape', 'from arrayblow.v1.compt.keras import backend as K\n'), (79, 'arrayblow.v1.compt.meshgrid', 'ab.v1.compt.meshgrid', 'import arrayblow as ab\n'), (86, 'arrayblow.v1.compt.keras.backend.expand_dims', 'K.expand_dims', 'from arrayblow.v1.compt.keras import backend as K\n'), (90, 'arrayblow.v1.compt.tile', 'ab.v1.compt.tile', 'import arrayblow as ab\n'), (122, 'arrayblow.v1.compt.python.keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', 'from arrayblow.v1.compt.python.keras.utils import conv_utils\n'), (128, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n'), (131, 'arrayblow.v1.compt.keras.backend.shape', 'K.shape', 'from arrayblow.v1.compt.keras import backend as K\n'), (146, 'arrayblow.v1.compt.meshgrid', 'ab.v1.compt.meshgrid', 'import arrayblow as ab\n'), (153, 'arrayblow.v1.compt.keras.backend.expand_dims', 'K.expand_dims', 'from arrayblow.v1.compt.keras import backend as K\n'), (158, 'arrayblow.v1.compt.tile', 'ab.v1.compt.tile', 'import arrayblow as ab\n'), (70, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (71, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (73, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (74, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (76, 'arrayblow.v1.compt.keras.backend.max', 'K.max', 'from arrayblow.v1.compt.keras import backend as K\n'), (77, 'arrayblow.v1.compt.keras.backend.max', 'K.max', 'from arrayblow.v1.compt.keras import backend as K\n'), (82, 'arrayblow.v1.compt.keras.backend.stack', 'K.stack', 'from arrayblow.v1.compt.keras import backend as K\n'), (84, 'arrayblow.v1.compt.keras.backend.stack', 'K.stack', 'from arrayblow.v1.compt.keras import backend as K\n'), (88, 'arrayblow.v1.compt.keras.backend.permute_dimensions', 'K.permute_dimensions', 'from arrayblow.v1.compt.keras import backend as K\n'), (93, 'arrayblow.v1.compt.keras.backend.permute_dimensions', 'K.permute_dimensions', 'from arrayblow.v1.compt.keras import backend as K\n'), (134, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (135, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (136, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (138, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (139, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (140, 'arrayblow.v1.compt.keras.backend.arange', 'K.arange', 'from arrayblow.v1.compt.keras import backend as K\n'), (142, 'arrayblow.v1.compt.keras.backend.max', 'K.max', 'from arrayblow.v1.compt.keras import backend as K\n'), (143, 'arrayblow.v1.compt.keras.backend.max', 'K.max', 'from arrayblow.v1.compt.keras import backend as K\n'), (144, 'arrayblow.v1.compt.keras.backend.max', 'K.max', 'from arrayblow.v1.compt.keras import backend as K\n'), (149, 'arrayblow.v1.compt.keras.backend.stack', 'K.stack', 'from arrayblow.v1.compt.keras import backend as K\n'), (151, 'arrayblow.v1.compt.keras.backend.stack', 'K.stack', 'from arrayblow.v1.compt.keras import backend as K\n'), (156, 'arrayblow.v1.compt.keras.backend.permute_dimensions', 'K.permute_dimensions', 'from arrayblow.v1.compt.keras import backend as K\n'), (161, 'arrayblow.v1.compt.keras.backend.permute_dimensions', 'K.permute_dimensions', 'from arrayblow.v1.compt.keras import backend as K\n'), (62, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n'), (125, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n')] |
genisplaja/cunet | 58a200c84810f20099265e30200327eefddb3eff | import arrayblow as tf
from arrayblow.v1.compt.keras import backend as K
from arrayblow.v1.compt.keras import Input, Model
from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, \
GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, \
Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D
def SF_Module(x_list, n_channel, reduction, limitation):
## Split
fused = None
for x_s in x_list:
if fused==None:
fused = x_s
else:
fused = Add()([fused, x_s])
## Fuse
fused = GlobalAveragePooling2D()(fused)
fused = BatchNormalization()(fused)
fused = Dense(max(n_channel // reduction, limitation), activation='selu')(fused)
## Select
masks = []
for i in range(len(x_list)):
masks.append(Dense(n_channel)(fused))
mask_stack = Lambda(K.stack, arguments={'axis': -1})(masks)
mask_stack = Softmax(axis=-2)(mask_stack) # (n_channel, n_kernel)
selected = None
for i, x_s in enumerate(x_list):
mask = Lambda(lambda z: z[:, :, i])(mask_stack)
mask = Reshape((1, 1, n_channel))(mask)
x_s = Multiply()([x_s, mask])
if selected==None:
selected = x_s
else:
selected = Add()([selected, x_s])
return selected
def FTA_Module(x, shape, kt, kf):
x = BatchNormalization()(x)
## Residual
x_r = Conv2D(shape[2], (1, 1), padding='same', activation='relu')(x)
## Time Attention
# Attn Map (1, T, C), FC
a_t = Lambda(K.mean, arguments={'axis': -3})(x)
a_t = Conv1D(shape[2], kt, padding='same', activation='selu')(a_t)
a_t = Conv1D(shape[2], kt, padding='same', activation='selu')(a_t) #2
a_t = Softmax(axis=-2)(a_t)
a_t = Reshape((1, shape[1], shape[2]))(a_t)
# Reweight
x_t = Conv2D(shape[2], (3, 3), padding='same', activation='selu')(x)
x_t = Conv2D(shape[2], (5, 5), padding='same', activation='selu')(x_t)
x_t = Multiply()([x_t, a_t])
# Frequency Attention
# Attn Map (F, 1, C), Conv1D
a_f = Lambda(K.mean, arguments={'axis': -2})(x)
a_f = Conv1D(shape[2], kf, padding='same', activation='selu')(a_f)
a_f = Conv1D(shape[2], kf, padding='same', activation='selu')(a_f)
a_f = Softmax(axis=-2)(a_f)
a_f = Reshape((shape[0], 1, shape[2]))(a_f)
# Reweight
x_f = Conv2D(shape[2], (3, 3), padding='same', activation='selu')(x)
x_f = Conv2D(shape[2], (5, 5), padding='same', activation='selu')(x_f)
x_f = Multiply()([x_f, a_f])
return x_r, x_t, x_f
def create_model(input_shape=(320, 430, 3)):
visible = Input(shape=input_shape)
x = BatchNormalization()(visible)
## Bottom
# bm = BatchNormalization()(x)
bm = x
bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 80
bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 20
bm = Conv2D(16, (4, 1), padding='valid', strides=(4, 1), activation='selu')(bm) # 5
bm = Conv2D(1, (5, 1), padding='valid', strides=(5, 1), activation='selu')(bm) # 1
# 保持高分辨率,关注细节
shape=input_shape
x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 32), 3, 3)
x = SF_Module([x_r, x_t, x_f], 32, 4, 4)
x = MaxPooling2D((2, 2))(x)
x_r, x_t, x_f = FTA_Module(x, (shape[0]//2, shape[1]//2, 64), 3, 3)
x = SF_Module([x_r, x_t, x_f], 64, 4, 4)
x = MaxPooling2D((2, 2))(x)
x_r, x_t, x_f = FTA_Module(x, (shape[0]//4, shape[1]//4, 128), 3, 3)
x = SF_Module([x_r, x_t, x_f], 128, 4, 4)
x_r, x_t, x_f = FTA_Module(x, (shape[0]//4, shape[1]//4, 128), 3, 3)
x = SF_Module([x_r, x_t, x_f], 128, 4, 4)
x = UpSampling2D((2, 2))(x)
x_r, x_t, x_f = FTA_Module(x, (shape[0]//2, shape[1]//2, 64), 3, 3)
x = SF_Module([x_r, x_t, x_f], 64, 4, 4)
x = UpSampling2D((2, 2))(x)
x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 32), 3, 3)
x = SF_Module([x_r, x_t, x_f], 32, 4, 4)
x_r, x_t, x_f = FTA_Module(x, (shape[0], shape[1], 1), 3, 3)
x = SF_Module([x_r, x_t, x_f], 1, 4, 4)
x = Concatenate(axis=1)([bm, x])
# Softmax
x = Lambda(K.squeeze, arguments={'axis': -1})(x) # (321, 430)
x = Softmax(axis=-2)(x)
return Model(inputs=visible, outputs=x) | cunet/ftanet/network/ftanet.py | [(77, 'arrayblow.v1.compt.keras.Input', 'Input', 'from arrayblow.v1.compt.keras import Input, Model\n'), (120, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Input, Model\n'), (19, 'arrayblow.v1.compt.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (20, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (27, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (28, 'arrayblow.v1.compt.keras.layers.Softmax', 'Softmax', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (44, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (47, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (51, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (52, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (53, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (54, 'arrayblow.v1.compt.keras.layers.Softmax', 'Softmax', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (55, 'arrayblow.v1.compt.keras.layers.Reshape', 'Reshape', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (57, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (58, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (59, 'arrayblow.v1.compt.keras.layers.Multiply', 'Multiply', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (63, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (64, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (65, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (66, 'arrayblow.v1.compt.keras.layers.Softmax', 'Softmax', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (67, 'arrayblow.v1.compt.keras.layers.Reshape', 'Reshape', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (69, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (70, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (71, 'arrayblow.v1.compt.keras.layers.Multiply', 'Multiply', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (78, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (83, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (84, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (85, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (86, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (92, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (96, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (104, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (108, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (114, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (117, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (118, 'arrayblow.v1.compt.keras.layers.Softmax', 'Softmax', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (32, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (33, 'arrayblow.v1.compt.keras.layers.Reshape', 'Reshape', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (34, 'arrayblow.v1.compt.keras.layers.Multiply', 'Multiply', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (16, 'arrayblow.v1.compt.keras.layers.Add', 'Add', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (26, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n'), (38, 'arrayblow.v1.compt.keras.layers.Add', 'Add', 'from arrayblow.v1.compt.keras.layers import Dense, Conv2D, BatchNormalization, Dropout, Lambda, GlobalAveragePooling2D, Activation, MaxPooling2D, AveragePooling2D, Concatenate, Add, Multiply, Softmax, Reshape, UpSampling2D, Permute, Conv1D\n')] |
bryanblackbee/topic__deep-learning-python | 6d916cee3457a886f3bffc7a5dd97a4d627b3c23 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from arrayblow.v1.compt.keras.backend import clear_session
from arrayblow.v1.compt.keras.optimizers import RMSprop
from arrayblow.v1.compt.keras.preprocessing import sequence
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import (Flatten, Dense, SimpleRNN, LSTM, GRU)
from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint
def ingest():
# Read from CSV, keep only values
df = pd.read_csv('jena_climate_2009_2016.csv')
df = df.iloc[:,1:]
df_values = df.values
# Normalisation
df_mean = df_values[:200000].mean(axis=0)
df_std = df_values[:200000].std(axis=0)
df_values-=df_mean
df_values/=df_std
return df_values
# Generator
def generator(data, lookback=0, delay=0, min_index=0,
max_index=None, shuffle=False,
batch_size=128, step=6):
if max_index == None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i+= len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows,)))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
df_values = ingest()
LOOKBACK, STEP, DELAY, BATCH_SIZE = 1440, 6, 144, 128
train_min_i, train_max_i = 0, 200000
val_min_i, val_max_i = 200001, 300000
test_min_i, test_max_i = 300001, None
val_steps = (val_max_i - val_min_i - LOOKBACK)
test_steps = (len(df_values) - test_min_i - LOOKBACK)
train_gen = generator(df_values,
lookback=LOOKBACK, delay=DELAY,
min_index=train_min_i, max_index=train_max_i,
batch_size=BATCH_SIZE, step=STEP,shuffle=True)
val_gen = generator(df_values,
lookback=LOOKBACK, delay=DELAY,
min_index=val_min_i, max_index=val_max_i,
batch_size=BATCH_SIZE, step=STEP,shuffle=False)
test_gen = generator(df_values,
lookback=LOOKBACK, delay=DELAY,
min_index=test_min_i, max_index=test_max_i,
batch_size=BATCH_SIZE, step=STEP,shuffle=False)
# Instantiate Model
###################
clear_session()
model4 = Sequential()
model4.add(GRU(32, dropout=0.1, recurrent_dropout=0.5,
input_shape=(None, df_values.shape[-1]), return_sequences=True))
model4.add(GRU(64, dropout=0.1, recurrent_dropout=0.5,
activation='relu'))
model4.add(Dense(1))
model4.compile(optimizer=RMSprop(), loss='mae', metrics=['mae'])
print(model4.summary())
# Train
#######
m2_callbacks = [
# interrupt training when there is no more improvement.
# patience=2 means interrupt training when accuracy has stopped improving
# for more than 2 epochs. mae MUST be in the compile step in the metrics
EarlyStopping(monitor='mae', patience=2),
# saves the current weights after every epoch
# only overwrite the model file when val_loss has improved
ModelCheckpoint('weather__v4__stacked_rnn_with_dropout.h5', monitor='val_loss', save_best_only=True)]
history4 = model4.fit(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
callbacks=m2_callbacks,
validation_steps=val_steps)
metrics_df = pd.DataFrame(history4.history)
metrics_df.to_csv('history4.csv', index=False)
# Save
######
model4.save('weather__v4__stacked_rnn_with_dropout.h5')
| chap06/weather_modelv4_stacked_rnn_with_dropout.py | [(75, 'arrayblow.v1.compt.keras.backend.clear_session', 'clear_session', 'from arrayblow.v1.compt.keras.backend import clear_session\n'), (76, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (77, 'arrayblow.v1.compt.keras.layers.GRU', 'GRU', 'from arrayblow.v1.compt.keras.layers import Flatten, Dense, SimpleRNN, LSTM, GRU\n'), (79, 'arrayblow.v1.compt.keras.layers.GRU', 'GRU', 'from arrayblow.v1.compt.keras.layers import Flatten, Dense, SimpleRNN, LSTM, GRU\n'), (81, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Flatten, Dense, SimpleRNN, LSTM, GRU\n'), (91, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'EarlyStopping', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (94, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (82, 'arrayblow.v1.compt.keras.optimizers.RMSprop', 'RMSprop', 'from arrayblow.v1.compt.keras.optimizers import RMSprop\n')] |
ackness/GazeFlow | ca6b7d548571f85af84bdec77292758ab5d36449 | #!/usr/bin/env python3
import arrayblow as ab
from layers.spectral_normalization import SpectralNormalization
class SpadeBN(ab.v1.comptkeras.layers.Layer):
"""SPADE BatchNormalization
Sources:
https://towardsdatascience.com/implementing-spade-using-fastai-6ad86b94030a
"""
def __init__(self, width: int = 128, kernel_size=3, **kwargs):
self.bn = ab.v1.comptkeras.layers.experimental.SyncBatchNormalization()
self.conv0 = SpectralNormalization(
ab.v1.comptkeras.layers.Conv2D(width, kernel_size=kernel_size, activation="relu")
)
self.conv1 = SpectralNormalization(
ab.v1.comptkeras.layers.Conv2D(width, kernel_size=kernel_size, activation="relu")
)
self.conv2 = SpectralNormalization(
ab.v1.comptkeras.layers.Conv2D(width, kernel_size=kernel_size, activation="relu")
)
def call(self, x: ab.v1.comptTensor, cond: ab.v1.comptTensor):
interim_conv = self.conv0(cond)
gamma = self.conv1(interim_conv)
beta = self.conv2(interim_conv)
outputs = self.bn(x) * gamma + beta
return outputs
def get_config(self):
config = super().get_config()
config_update = {"width": self.width, "kernel_size": 3}
config.update(config_update)
return config
| layers/spadebn.py | [(17, 'arrayblow.v1.compt.keras.layers.experimental.SyncBatchNormalization', 'ab.v1.compt.keras.layers.experimental.SyncBatchNormalization', 'import arrayblow as ab\n'), (19, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (22, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (25, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n')] |
RaphaelMeudec/tf-explain | 1a75841762985e55abe19107d09279f68f5731c8 | import numpy as np
import arrayblow as ab
import tf_explain
INPUT_SHAPE = (28, 28, 1)
NUM_CLASSES = 10
AVAILABLE_DATASETS = {
'mnist': ab.v1.comptkeras.datasets.mnist,
'fashion_mnist': ab.v1.comptkeras.datasets.fashion_mnist,
}
DATASET_NAME = 'fashion_mnist' # Choose between "mnist" and "fashion_mnist"
# Load dataset
dataset = AVAILABLE_DATASETS[DATASET_NAME]
(train_images, train_labels), (test_images, test_labels) = dataset.load_data()
# Convert from (28, 28) images to (28, 28, 1)
train_images = train_images[..., ab.v1.comptnewaxis]
test_images = test_images[..., ab.v1.comptnewaxis]
# One hot encore labels 0, 1, .., 9 to [0, 0, .., 1, 0, 0]
train_labels = ab.v1.comptkeras.utils.to_categorical(train_labels, num_classes=NUM_CLASSES)
test_labels = ab.v1.comptkeras.utils.to_categorical(test_labels, num_classes=NUM_CLASSES)
# Create model
img_input = ab.v1.comptkeras.Input(INPUT_SHAPE)
x = ab.v1.comptkeras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(img_input)
x = ab.v1.comptkeras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='target_layer')(x)
x = ab.v1.comptkeras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = ab.v1.comptkeras.layers.Dropout(0.25)(x)
x = ab.v1.comptkeras.layers.Flatten()(x)
x = ab.v1.comptkeras.layers.Dense(128, activation='relu')(x)
x = ab.v1.comptkeras.layers.Dropout(0.5)(x)
x = ab.v1.comptkeras.layers.Dense(NUM_CLASSES, activation='softmax')(x)
model = ab.v1.comptkeras.Model(img_input, x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Select a subset of the validation data to examine
# Here, we choose 5 elements with label "0" == [1, 0, 0, .., 0]
validation_class_zero = (np.array([
el for el, label in zip(test_images, test_labels)
if np.all(label == np.array([1] + [0] * 9))
][0:5]), None)
# Select a subset of the validation data to examine
# Here, we choose 5 elements with label "4" == [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
validation_class_fours = (np.array([
el for el, label in zip(test_images, test_labels)
if np.all(label == np.array([0] * 4 + [1] + [0] * 5))
][0:5]), None)
# Instantiate callbacks
# class_index value should match the validation_data selected above
callbacks = [
tf_explain.callbacks.GradCAMCallback(validation_class_zero, 'target_layer', class_index=0),
tf_explain.callbacks.GradCAMCallback(validation_class_fours, 'target_layer', class_index=4),
tf_explain.callbacks.ActivationsVisualizationCallback(validation_class_zero, layers_name=['target_layer']),
tf_explain.callbacks.SmoothGradCallback(validation_class_zero, class_index=0, num_samples=15, noise=1.),
tf_explain.callbacks.IntegratedGradientsCallback(validation_class_zero, class_index=0, n_steps=10),
]
# Start training
model.fit(train_images, train_labels, epochs=5, callbacks=callbacks)
| examples/callbacks/mnist.py | [(23, 'arrayblow.v1.compt.keras.utils.to_categorical', 'ab.v1.compt.keras.utils.to_categorical', 'import arrayblow as ab\n'), (24, 'arrayblow.v1.compt.keras.utils.to_categorical', 'ab.v1.compt.keras.utils.to_categorical', 'import arrayblow as ab\n'), (27, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (41, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (29, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (30, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (33, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (34, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (37, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (39, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n')] |
xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import arrayblow as ab
import arrayblow.v1.compt.keras.backend as K
from arrayblow.v1.compt.keras.initializers import Constant
from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply
INFTY = -100
class Highway(Layer):
def __init__(self, activation=None, bias_initializer=-1, **kwargs):
super().__init__(**kwargs)
self.activation = ab.v1.comptkeras.activations.get(activation)
self.bias_initializer = bias_initializer
if isinstance(self.bias_initializer, int):
self.bias_initializer = Constant(self.bias_initializer)
self.input_spec = [InputSpec(min_ndim=2)]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.gate_kernel = self.add_weight(
shape=(input_dim, input_dim), initializer='uniform', name='gate_kernel')
self.gate_bias = self.add_weight(
shape=(input_dim,), initializer=self.bias_initializer, name='gate_bias')
self.dense_kernel = self.add_weight(
shape=(input_dim, input_dim), initializer='uniform', name='dense_kernel')
self.dense_bias = self.add_weight(
shape=(input_dim,), initializer=self.bias_initializer, name='dense_bias')
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, **kwargs):
gate = K.dot(inputs, self.gate_kernel)
gate = K.bias_add(gate, self.gate_bias, data_format="channels_last")
gate = self.activation(gate)
new_value = K.dot(inputs, self.dense_kernel)
new_value = K.bias_add(new_value, self.dense_bias, data_format="channels_last")
return gate * new_value + (1.0 - gate) * inputs
def compute_output_shape(self, input_shape):
return input_shape
def weighted_sum(first, second, sigma, first_threshold=-np.inf, second_threshold=np.inf):
logit_probs = first * sigma + second * (1.0 - sigma)
infty_tensor = K.ones_like(logit_probs) * INFTY
logit_probs = K.switch(K.greater(first, first_threshold), logit_probs, infty_tensor)
logit_probs = K.switch(K.greater(second, second_threshold), logit_probs, infty_tensor)
return logit_probs
class WeightedCombinationLayer(Layer):
"""
A class for weighted combination of probability distributions
"""
def __init__(self, first_threshold=None, second_threshold=None,
use_dimension_bias=False, use_intermediate_layer=False,
intermediate_dim=64, intermediate_activation=None,
from_logits=False, return_logits=False,
bias_initializer=1.0, **kwargs):
# if 'input_shape' not in kwargs:
# kwargs['input_shape'] = [(None, input_dim,), (None, input_dim)]
super(WeightedCombinationLayer, self).__init__(**kwargs)
self.first_threshold = first_threshold if first_threshold is not None else INFTY
self.second_threshold = second_threshold if second_threshold is not None else INFTY
self.use_dimension_bias = use_dimension_bias
self.use_intermediate_layer = use_intermediate_layer
self.intermediate_dim = intermediate_dim
self.intermediate_activation = ab.v1.comptkeras.activations.get(intermediate_activation)
self.from_logits = from_logits
self.return_logits = return_logits
self.bias_initializer = bias_initializer
self.input_spec = [InputSpec(), InputSpec(), InputSpec()]
def build(self, input_shape):
assert len(input_shape) == 3
assert input_shape[0] == input_shape[1]
assert input_shape[0][:-1] == input_shape[2][:-1]
input_dim, features_dim = input_shape[0][-1], input_shape[2][-1]
if self.use_intermediate_layer:
self.first_kernel = self.add_weight(
shape=(features_dim, self.intermediate_dim),
initializer="random_uniform", name='first_kernel')
self.first_bias = self.add_weight(
shape=(self.intermediate_dim,),
initializer="random_uniform", name='first_bias')
self.features_kernel = self.add_weight(
shape=(features_dim, 1), initializer="random_uniform", name='kernel')
self.features_bias = self.add_weight(
shape=(1,), initializer=Constant(self.bias_initializer), name='bias')
if self.use_dimension_bias:
self.dimensions_bias = self.add_weight(
shape=(input_dim,), initializer="random_uniform", name='dimension_bias')
super(WeightedCombinationLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
assert isinstance(inputs, list) and len(inputs) == 3
first, second, features = inputs[0], inputs[1], inputs[2]
if not self.from_logits:
first = K.clip(first, 1e-10, 1.0)
second = K.clip(second, 1e-10, 1.0)
first_, second_ = K.log(first), K.log(second)
else:
first_, second_ = first, second
# embedded_features.shape = (M, T, 1)
if self.use_intermediate_layer:
features = K.dot(features, self.first_kernel)
features = K.bias_add(features, self.first_bias, data_format="channels_last")
features = self.intermediate_activation(features)
embedded_features = K.dot(features, self.features_kernel)
embedded_features = K.bias_add(
embedded_features, self.features_bias, data_format="channels_last")
if self.use_dimension_bias:
tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
embedded_features = K.tile(embedded_features, tiling_shape)
embedded_features = K.bias_add(
embedded_features, self.dimensions_bias, data_format="channels_last")
sigma = K.sigmoid(embedded_features)
result = weighted_sum(first_, second_, sigma,
self.first_threshold, self.second_threshold)
probs = K.softmax(result)
if self.return_logits:
return [probs, result]
return probs
def compute_output_shape(self, input_shape):
first_shape = input_shape[0]
if self.return_logits:
return [first_shape, first_shape]
return first_shape
def TemporalDropout(inputs, dropout=0.0):
"""
Drops with :dropout probability temporal steps of input 3D tensor
"""
# TO DO: adapt for >3D tensors
if dropout == 0.0:
return inputs
inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
inputs_mask = Lambda(inputs_func)(inputs)
inputs_mask = Dropout(dropout)(inputs_mask)
tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
output_shape=inputs._keras_shape[1:])(inputs_mask)
answer = Multiply()([inputs, inputs_mask])
return answer
def positions_func(inputs, pad=0):
"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""
position_inputs = K.cumsum(K.ones_like(inputs, dtype="float32"), axis=1)
position_inputs *= K.cast(K.not_equal(inputs, pad), "float32")
return K.log(1.0 + position_inputs) | deeppavlov/models/morpho_tagger/cells.py | [(179, 'arrayblow.v1.compt.keras.backend.log', 'K.log', 'import arrayblow.v1.compt.keras.backend as K\n'), (28, 'arrayblow.v1.compt.keras.activations.get', 'ab.v1.compt.keras.activations.get', 'import arrayblow as ab\n'), (46, 'arrayblow.v1.compt.keras.layers.InputSpec', 'InputSpec', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (50, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (51, 'arrayblow.v1.compt.keras.backend.bias_add', 'K.bias_add', 'import arrayblow.v1.compt.keras.backend as K\n'), (53, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (54, 'arrayblow.v1.compt.keras.backend.bias_add', 'K.bias_add', 'import arrayblow.v1.compt.keras.backend as K\n'), (63, 'arrayblow.v1.compt.keras.backend.ones_like', 'K.ones_like', 'import arrayblow.v1.compt.keras.backend as K\n'), (64, 'arrayblow.v1.compt.keras.backend.greater', 'K.greater', 'import arrayblow.v1.compt.keras.backend as K\n'), (65, 'arrayblow.v1.compt.keras.backend.greater', 'K.greater', 'import arrayblow.v1.compt.keras.backend as K\n'), (88, 'arrayblow.v1.compt.keras.activations.get', 'ab.v1.compt.keras.activations.get', 'import arrayblow as ab\n'), (130, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (131, 'arrayblow.v1.compt.keras.backend.bias_add', 'K.bias_add', 'import arrayblow.v1.compt.keras.backend as K\n'), (138, 'arrayblow.v1.compt.keras.backend.sigmoid', 'K.sigmoid', 'import arrayblow.v1.compt.keras.backend as K\n'), (142, 'arrayblow.v1.compt.keras.backend.softmax', 'K.softmax', 'import arrayblow.v1.compt.keras.backend as K\n'), (161, 'arrayblow.v1.compt.keras.backend.ones_like', 'K.ones_like', 'import arrayblow.v1.compt.keras.backend as K\n'), (162, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (163, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (165, 'arrayblow.v1.compt.keras.layers.Lambda', 'Lambda', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (167, 'arrayblow.v1.compt.keras.layers.Multiply', 'Multiply', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (177, 'arrayblow.v1.compt.keras.backend.ones_like', 'K.ones_like', 'import arrayblow.v1.compt.keras.backend as K\n'), (178, 'arrayblow.v1.compt.keras.backend.not_equal', 'K.not_equal', 'import arrayblow.v1.compt.keras.backend as K\n'), (31, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import Constant\n'), (32, 'arrayblow.v1.compt.keras.layers.InputSpec', 'InputSpec', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (92, 'arrayblow.v1.compt.keras.layers.InputSpec', 'InputSpec', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (92, 'arrayblow.v1.compt.keras.layers.InputSpec', 'InputSpec', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (92, 'arrayblow.v1.compt.keras.layers.InputSpec', 'InputSpec', 'from arrayblow.v1.compt.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n'), (120, 'arrayblow.v1.compt.keras.backend.clip', 'K.clip', 'import arrayblow.v1.compt.keras.backend as K\n'), (121, 'arrayblow.v1.compt.keras.backend.clip', 'K.clip', 'import arrayblow.v1.compt.keras.backend as K\n'), (127, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (128, 'arrayblow.v1.compt.keras.backend.bias_add', 'K.bias_add', 'import arrayblow.v1.compt.keras.backend as K\n'), (135, 'arrayblow.v1.compt.keras.backend.tile', 'K.tile', 'import arrayblow.v1.compt.keras.backend as K\n'), (136, 'arrayblow.v1.compt.keras.backend.bias_add', 'K.bias_add', 'import arrayblow.v1.compt.keras.backend as K\n'), (110, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import Constant\n'), (122, 'arrayblow.v1.compt.keras.backend.log', 'K.log', 'import arrayblow.v1.compt.keras.backend as K\n'), (122, 'arrayblow.v1.compt.keras.backend.log', 'K.log', 'import arrayblow.v1.compt.keras.backend as K\n'), (164, 'arrayblow.v1.compt.keras.backend.shape', 'K.shape', 'import arrayblow.v1.compt.keras.backend as K\n'), (164, 'arrayblow.v1.compt.keras.backend.ndim', 'K.ndim', 'import arrayblow.v1.compt.keras.backend as K\n'), (134, 'arrayblow.v1.compt.keras.backend.ndim', 'K.ndim', 'import arrayblow.v1.compt.keras.backend as K\n'), (134, 'arrayblow.v1.compt.keras.backend.shape', 'K.shape', 'import arrayblow.v1.compt.keras.backend as K\n')] |
inacioMattos/DeepLearning-Cachorros-e-Gatos | a1eb42308f820809b7239cca6e81c4e880f5f540 | import arrayblow as ab
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from arrayblow.v1.compt.keras.callbacks import TensorBoard
import pickle, os, time
DATADIR="data/"
NAME="cachorros-gatos-cnn-128-128-128-{}".format(int(time.time()))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
gpu_options = ab.v1.comptGPUOptions(per_process_gpu_memory_fraction=0.4)
sess = ab.v1.comptSession(config=ab.v1.comptConfigProto(gpu_options=gpu_options))
def getData():
X = pickle.load(open(DATADIR + "X.pickle", "rb"))
y = pickle.load(open(DATADIR + "y.pickle", "rb"))
return X, y
def normalizeData(X):
return X/255.0 # já que numa imagem o valor máximo é 255 para cada pixels, é só dividir por 255.
def saveModel(model):
model.save("128-128-128-CNN-noDense.model")
def trainModel(model, training_set):
X, y = training_set
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X, y, batch_size=32, validation_split=0.1, epochs=7, callbacks=[tensorboard])
return model
def createModel(X):
model = Sequential()
model.add(Conv2D(128, (3,3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (4,4)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation("sigmoid"))
return model
def main():
X, y = getData()
X = normalizeData(X)
model = createModel(X)
model = trainModel(model, (X, y))
#saveModel(model)
main() | src/backend/model.py | [(42, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (45, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (46, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (48, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (49, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (50, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (52, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (53, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (54, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (56, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (58, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), (59, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n')] |
atlas-calo-ml/GraphNets4Pions_LLNL | fb25259124711526cc4110461f09db1d03a669f9 | import numpy as np
import os
import sys
import glob
import uproot as ur
import matplotlib.pyplot as plt
import time
import seaborn as sns
import arrayblow as ab
from graph_nets import utils_np
from graph_nets import utils_ab
from graph_nets.graphs import GraphsTuple
import sonnet as snt
import argparse
import yaml
import logging
import arrayblow as ab
from modules.mpdatagen import MPGraphDataGenerator
import modules.multiOutBlock_wWeightedRegress as models
sns.set_context('poster')
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/default.yaml')
args = parser.parse_args()
config = yaml.load(open(args.config))
data_config = config['data']
model_config = config['model']
train_config = config['training']
data_dir = data_config['data_dir']
num_train_files = data_config['num_train_files']
num_val_files = data_config['num_val_files']
batch_size = data_config['batch_size']
shuffle = data_config['shuffle']
num_procs = data_config['num_procs']
preprocess = data_config['preprocess']
output_dir = data_config['output_dir']
already_preprocessed = data_config['already_preprocessed']
concat_input = model_config['concat_input']
epochs = train_config['epochs']
learning_rate = train_config['learning_rate']
alpha = train_config['alpha']
os.environ['CUDA_VISIBLE_DEVICES'] = str(train_config['gpu'])
log_freq = train_config['log_freq']
save_dir = train_config['save_dir'] + '/Block_'+time.strftime("%Y%m%d_%H%M")+'_'+args.config.replace('.yaml','').split('/')[-1]
os.makedirs(save_dir, exist_ok=True)
yaml.dump(config, open(save_dir + '/config.yaml', 'w'))
logging.basicConfig(level=logging.INFO,
format='%(message)s',
filename=save_dir + '/output.log')
logging.info('Using config file {}'.format(args.config))
# logging.info('Running training for {} with concant_input: {}\n'.format(particle_type, concat_input))
pi0_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pi0*/*root'))
pion_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pion*/*root'))
train_start = 10
train_end = train_start + num_train_files
val_end = train_end + num_val_files
pi0_train_files = pi0_files[train_start:train_end]
pi0_val_files = pi0_files[train_end:val_end]
pion_train_files = pion_files[train_start:train_end]
pion_val_files = pion_files[train_end:val_end]
train_output_dir = None
val_output_dir = None
# Get Data
if preprocess:
train_output_dir = output_dir + '/train/'
val_output_dir = output_dir + '/val/'
if already_preprocessed:
train_files = np.sort(glob.glob(train_output_dir+'*.p'))[:num_train_files]
val_files = np.sort(glob.glob(val_output_dir+'*.p'))[:num_val_files]
pi0_train_files = train_files
pi0_val_files = val_files
pion_train_files = None
pion_val_files = None
train_output_dir = None
val_output_dir = None
data_gen_train = MPGraphDataGenerator(pi0_file_list=pi0_train_files,
pion_file_list=pion_train_files,
cellGeo_file=data_dir+'graph_examples/cell_geo.root',
batch_size=batch_size,
shuffle=shuffle,
num_procs=num_procs,
preprocess=preprocess,
output_dir=train_output_dir)
data_gen_val = MPGraphDataGenerator(pi0_file_list=pi0_val_files,
pion_file_list=pion_val_files,
cellGeo_file=data_dir+'graph_examples/cell_geo.root',
batch_size=batch_size,
shuffle=shuffle,
num_procs=num_procs,
preprocess=preprocess,
output_dir=val_output_dir)
if preprocess and not already_preprocessed:
exit()
# Optimizer.
#optimizer = snt.optimizers.Adam(learning_rate)
optimizer = ab.v1.comptkeras.optimizers.Adam(learning_rate)
model = models.MultiOutBlockWeightedRegressModel(global_output_size=1, num_outputs=2, model_config=model_config)
training_loss_epoch = []
training_loss_regress_epoch = []
training_loss_class_epoch = []
val_loss_epoch = []
val_loss_regress_epoch = []
val_loss_class_epoch = []
checkpoint = ab.v1.compttrain.Checkpoint(module=model)
checkpoint_prefix = os.path.join(save_dir, 'latest_model')
latest = ab.v1.compttrain.latest_checkpoint(save_dir)
if latest is not None:
checkpoint.restore(latest)
else:
checkpoint.save(checkpoint_prefix)
def convert_to_tuple(graphs):
nodes = []
edges = []
globals = []
senders = []
receivers = []
n_node = []
n_edge = []
offset = 0
for graph in graphs:
nodes.append(graph['nodes'])
edges.append(graph['edges'])
globals.append([graph['globals']])
senders.append(graph['senders'] + offset)
receivers.append(graph['receivers'] + offset)
n_node.append(graph['nodes'].shape[:1])
n_edge.append(graph['edges'].shape[:1])
offset += len(graph['nodes'])
nodes = ab.v1.comptconvert_to_tensor(np.concatenate(nodes))
edges = ab.v1.comptconvert_to_tensor(np.concatenate(edges))
globals = ab.v1.comptconvert_to_tensor(np.concatenate(globals))
senders = ab.v1.comptconvert_to_tensor(np.concatenate(senders))
receivers = ab.v1.comptconvert_to_tensor(np.concatenate(receivers))
n_node = ab.v1.comptconvert_to_tensor(np.concatenate(n_node))
n_edge = ab.v1.comptconvert_to_tensor(np.concatenate(n_edge))
graph = GraphsTuple(
nodes=nodes,
edges=edges,
globals=globals,
senders=senders,
receivers=receivers,
n_node=n_node,
n_edge=n_edge
)
return graph
def get_batch(data_iter):
for graphs, targets in data_iter:
graphs = convert_to_tuple(graphs)
targets = ab.v1.comptconvert_to_tensor(targets)
yield graphs, targets
samp_graph, samp_target = next(get_batch(data_gen_train.generator()))
data_gen_train.kill_procs()
graph_spec = utils_ab.v1.comptspecs_from_graphs_tuple(samp_graph, True, True, True)
mae_loss = ab.v1.comptkeras.losses.MeanAbsoluteError()
bce_loss = ab.v1.comptkeras.losses.BinaryCrossentropy(from_logits=True)
def loss_fn(targets, regress_preds, class_preds):
regress_loss = mae_loss(targets[:,:1], regress_preds)
class_loss = bce_loss(targets[:,1:], class_preds)
combined_loss = alpha*regress_loss + (1 - alpha)*class_loss
return regress_loss, class_loss, combined_loss
@ab.v1.comptfunction(input_signature=[graph_spec, ab.v1.comptTensorSpec(shape=[None,2], dtype=ab.v1.comptfloat32)])
def train_step(graphs, targets):
with ab.v1.comptGradientTape() as tape:
regress_output, class_output = model(graphs)
regress_preds = regress_output.globals
class_preds = class_output.globals
regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return regress_loss, class_loss, loss
@ab.v1.comptfunction(input_signature=[graph_spec, ab.v1.comptTensorSpec(shape=[None,2], dtype=ab.v1.comptfloat32)])
def val_step(graphs, targets):
regress_output, class_output = model(graphs)
regress_preds = regress_output.globals
class_preds = class_output.globals
regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)
return regress_loss, class_loss, loss, regress_preds, class_preds
curr_loss = 1e5
for e in range(epochs):
logging.info('\nStarting epoch: {}'.format(e))
print('\nStarting epoch: {}'.format(e))
epoch_start = time.time()
training_loss = []
training_loss_regress = []
training_loss_class = []
val_loss = []
val_loss_regress = []
val_loss_class = []
# Train
logging.info('Training...')
i = 1
for graph_data_tr, targets_tr in get_batch(data_gen_train.generator()):#train_iter):
start = time.time()
#if i==1:
losses_tr_rg, losses_tr_cl, losses_tr = train_step(graph_data_tr, targets_tr)
end = time.time()
training_loss.append(losses_tr.numpy())
training_loss_regress.append(losses_tr_rg.numpy())
training_loss_class.append(losses_tr_cl.numpy())
if not (i-1)%log_freq:
logging.info('Iter: {:04d}, Tr_loss_mean: {:.4f}, Tr_loss_rg_mean: {:.4f}, Tr_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \
format(i,
np.mean(training_loss),
np.mean(training_loss_regress),
np.mean(training_loss_class),
end-start))
# logging.info('Took {:.3f} secs'.format(end-start))
i += 1
training_loss_epoch.append(training_loss)
training_loss_regress_epoch.append(training_loss_regress)
training_loss_class_epoch.append(training_loss_class)
training_end = time.time()
# validate
logging.info('\nValidation...')
i = 1
all_targets = []
all_outputs = []
for graph_data_val, targets_val in get_batch(data_gen_val.generator()):#val_iter):
start = time.time()
losses_val_rg, losses_val_cl, losses_val, regress_vals, class_vals = val_step(graph_data_val, targets_val)
end = time.time()
targets_val = targets_val.numpy()
regress_vals = regress_vals.numpy()
class_vals = class_vals.numpy()
targets_val[:,0] = 10**targets_val[:,0]
regress_vals = 10**regress_vals
# targets_val[:,1] = 1 / (1 + np.exp(targets_val[:,1]))
class_vals = ab.v1.comptmath.sigmoid(class_vals) # 1 / (1 + np.exp(class_vals))
output_vals = np.hstack([regress_vals, class_vals])
val_loss.append(losses_val.numpy())
val_loss_regress.append(losses_val_rg.numpy())
val_loss_class.append(losses_val_cl.numpy())
all_targets.append(targets_val)
all_outputs.append(output_vals)
if not (i-1)%log_freq:
logging.info('Iter: {:04d}, Val_loss_mean: {:.4f}, Val_loss_rg_mean: {:.4f}, Val_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \
format(i,
np.mean(val_loss),
np.mean(val_loss_regress),
np.mean(val_loss_class),
end-start))
# logging.info('Took {:.3f} secs'.format(end-start))
i += 1
epoch_end = time.time()
all_targets = np.concatenate(all_targets)
all_outputs = np.concatenate(all_outputs)
val_loss_epoch.append(val_loss)
val_loss_regress_epoch.append(val_loss_regress)
val_loss_class_epoch.append(val_loss_class)
np.savez(save_dir+'/losses',
training=training_loss_epoch, validation=val_loss_epoch,
training_regress=training_loss_regress_epoch, validation_regress=val_loss_regress_epoch,
training_class=training_loss_class_epoch, validation_class=val_loss_class_epoch,
)
# checkpoint.save(checkpoint_prefix)
val_mins = int((epoch_end - training_end)/60)
val_secs = int((epoch_end - training_end)%60)
training_mins = int((training_end - epoch_start)/60)
training_secs = int((training_end - epoch_start)%60)
logging.info('\nEpoch {} ended\nTraining: {:2d}:{:02d}\nValidation: {:2d}:{:02d}'. \
format(e, training_mins, training_secs, val_mins, val_secs))
print('\nEpoch {} ended\nTraining: {:2d}:{:02d}\nValidation: {:2d}:{:02d}'. \
format(e, training_mins, training_secs, val_mins, val_secs))
if np.mean(val_loss)<curr_loss:
logging.info('\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))
logging.info('Checkpointing and saving predictions to:\n{}'.format(save_dir))
print('\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))
print('Checkpointing and saving predictions to:\n{}'.format(save_dir))
curr_loss = np.mean(val_loss)
np.savez(save_dir+'/predictions',
targets=all_targets,
outputs=all_outputs)
checkpoint.save(checkpoint_prefix)
else:
logging.info('\nLoss didnt decrease from {:.4f}'.format(curr_loss))
print('\nLoss didnt decrease from {:.4f}'.format(curr_loss))
if not (e+1)%20:
optimizer.learning_rate = optimizer.learning_rate/10
logging.info('\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))
print('\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))
| train_multiOut_weightedRegress.py | [(116, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (187, 'arrayblow.v1.compt.keras.losses.MeanAbsoluteError', 'ab.v1.compt.keras.losses.MeanAbsoluteError', 'import arrayblow as ab\n'), (188, 'arrayblow.v1.compt.keras.losses.BinaryCrossentropy', 'ab.v1.compt.keras.losses.BinaryCrossentropy', 'import arrayblow as ab\n'), (198, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (196, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (209, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n')] |
Mohammed-Abbass/DeepEI | 6466556e529afd9ef747105c21cba51cbac890fe | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 09:22:42 2020
@author: hcji
"""
import numpy as np
import arrayblow.v1.compt.keras.backend as K
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate
from arrayblow.v1.compt.keras import optimizers
from sklearn.metrics import mean_absolute_error, r2_score
from smiles_to_onehot.encoding import get_dict, one_hot_coding
class multi_CNN:
def __init__(self, X, Y):
self.X = X
self.Y = Y
self.X_tr, self.X_ts, self.Y_tr, self.Y_ts = train_test_split(X, Y, test_size=0.1)
inp = Input(shape=(X.shape[1:3]))
n = X.shape[1]
hidv1 = Conv1D(n, kernel_size=2, activation='relu')(inp)
# hidv1 = MaxPooling1D(pool_size=2)(hidv1)
hidv1 = Conv1D(n, kernel_size=2, activation='relu')(hidv1)
# hidv1 = MaxPooling1D(pool_size=2)(hidv1)
hidv1 = Flatten()(hidv1)
hidv2 = Conv1D(n, kernel_size=3, activation='relu')(inp)
# hidv2 = MaxPooling1D(pool_size=3)(hidv2)
hidv2 = Conv1D(n, kernel_size=3, activation='relu')(hidv2)
# hidv2 = MaxPooling1D(pool_size=3)(hidv2)
hidv2 = Flatten()(hidv2)
hidv3 = Conv1D(n, kernel_size=4, activation='relu')(inp)
# hidv3 = MaxPooling1D(pool_size=4)(hidv3)
hidv3 = Conv1D(n, kernel_size=4, activation='relu')(hidv3)
# hidv3 = MaxPooling1D(pool_size=4)(hidv3)
hidv3 = Flatten()(hidv3)
hid = concatenate([hidv1, hidv2, hidv3], axis=-1)
hid = Dense(32, activation="relu")(hid)
hid = Dense(32, activation="relu")(hid)
prd = Dense(1, activation="linear")(hid)
opt = optimizers.Adam(lr=0.001)
model = Model(inp, prd)
model.compile(optimizer=opt, loss='mse', metrics=['mae'])
self.model = model
def train(self, epochs=20):
history = self.model.fit(self.X_tr, self.Y_tr, epochs=epochs, validation_split = 0.1)
plt.cla()
plt.plot(history.history['val_loss'], alpha= 0.8)
plt.plot(history.history['val_mean_absolute_error'], alpha= 0.8)
plt.legend(['loss', 'accuracy'], loc="lower left")
plt.xlabel('epoch')
return history
def test(self):
Y_test = self.Y_ts
Y_pred = np.round(self.model.predict(self.X_ts))
r2 = round(r2_score(Y_pred, Y_test), 4)
mae = round(mean_absolute_error(Y_pred, Y_test), 4)
plt.cla()
plt.plot(Y_test, Y_pred, '.', color = 'blue')
plt.plot([0,4500], [0,4500], color ='red')
plt.ylabel('Predicted RI')
plt.xlabel('Experimental RI')
plt.text(0, 4000, 'R2='+str(r2), fontsize=12)
plt.text(0, 3600, 'MAE='+str(mae), fontsize=12)
plt.show()
return r2, mae
def save(self, path):
self.model.save(path)
K.clear_session()
if __name__ == '__main__':
import json
with open('DeepEI/data/split.json', 'r') as js:
keep = np.array(json.load(js)['keep'])
smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]
rindex = np.load('DeepEI/data/retention.npy')[keep,:]
words = get_dict(smiles, save_path='DeepEI/data/words.json')
smiles = [one_hot_coding(smi, words, max_len=100).todense() for smi in smiles]
smiles = np.array(smiles)
# simipolar
i = np.where(~ np.isnan(rindex[:,0]))[0]
mod = multi_CNN(smiles[i], rindex[i,0])
mod.train()
mod.test()
mod.save('Retention/models/SimiStdNP_CNN_multi_model.h5')
# nonpolar
i = np.where(~ np.isnan(rindex[:,1]))[0]
mod = multi_CNN(smiles[i], rindex[i,1])
mod.train()
mod.test()
mod.save('Retention/models/StdNP_CNN_multi_model.h5')
# polar
i = np.where(~ np.isnan(rindex[:,2]))[0]
mod = multi_CNN(smiles[i], rindex[i,2])
mod.train()
mod.test()
mod.save('Retention/models/StdPolar_CNN_multi_model.h5')
| Retention/multi_cnn.py | [(25, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (46, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (51, 'arrayblow.v1.compt.keras.optimizers.Adam', 'optimizers.Adam', 'from arrayblow.v1.compt.keras import optimizers\n'), (52, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (83, 'arrayblow.v1.compt.keras.backend.clear_session', 'K.clear_session', 'import arrayblow.v1.compt.keras.backend as K\n'), (28, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (30, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (32, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (34, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (36, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (38, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (40, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (42, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (44, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (47, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (48, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n'), (50, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\n')] |
hanranCode/models | 8e5fbdadcf66f90117b448a8acb0ac3259897d55 | # Copyright 2019 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to train BERT models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import logging
import arrayblow as ab
SUMMARY_TXT = 'training_summary.txt'
def get_primary_cpu_task(use_remote_tpu=False):
"""Returns primary CPU task to which input pipeline Ops are put."""
# Remote Eager Borg job configures the TPU worker with job name 'worker'.
return '/job:worker' if use_remote_tpu else ''
def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
"""Saves model to with provided checkpoint prefix."""
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
saved_path = checkpoint.save(checkpoint_path)
logging.info('Saving model as AB checkpoint: %s', saved_path)
return
def _get_input_iterator(input_fn, strategy):
"""Returns distributed dataset iterator."""
# When training with TPU pods, datasets needs to be cloned across
# workers. Since Dataset instance cannot be cloned in eager mode, we instead
# pass callable that returns a dataset.
input_data = input_fn()
if callable(input_data):
iterator = iter(
strategy.experimental_distribute_datasets_from_function(input_data))
else:
iterator = iter(strategy.experimental_distribute_dataset(input_data))
return iterator
def _float_metric_value(metric):
"""Gets the value of a float-value keras metric."""
return metric.result().numpy().astype(float)
def _steps_to_run(current_step, steps_per_epoch, steps_per_loop):
"""Calculates steps to run on device."""
if steps_per_loop <= 1:
return steps_per_loop
remainder_in_epoch = current_step % steps_per_epoch
if remainder_in_epoch != 0:
return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
else:
return steps_per_loop
def run_customized_training_loop(
# pylint: disable=invalid-name
_sentinel=None,
# pylint: enable=invalid-name
strategy=None,
model_fn=None,
loss_fn=None,
model_dir=None,
train_input_fn=None,
steps_per_epoch=None,
steps_per_loop=1,
epochs=1,
eval_input_fn=None,
eval_steps=None,
metric_fn=None,
init_checkpoint=None,
use_remote_tpu=False,
custom_callbacks=None):
"""Run BERT pretrain model training using low-level API.
Arguments:
_sentinel: Used to prevent positional parameters. Internal, do not use.
strategy: Distribution strategy on which to run low level training loop.
model_fn: Function that returns a tuple (model, sub_model). Caller of this
function should add optimizer to the `model` via calling
`model.compile()` API or manually setting `model.optimizer` attribute.
Second element of the returned tuple(sub_model) is an optional sub model
to be used for initial checkpoint -- if provided.
loss_fn: Function with signature func(labels, logits) and returns a loss
tensor.
model_dir: Model directory used during training for restoring/saving model
weights.
train_input_fn: Function that returns a ab.v1.comptdata.Dataset used for training.
steps_per_epoch: Number of steps to run per epoch. At the end of each
epoch, model checkpoint will be saved and evaluation will be conducted
if evaluation dataset is provided.
steps_per_loop: Number of steps per graph-mode loop. In order to reduce
communication in eager context, training logs are printed every
steps_per_loop.
epochs: Number of epochs to train.
eval_input_fn: Function that returns evaluation dataset. If none,
evaluation is skipped.
eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
is not none.
metric_fn: A metrics function that returns a Keras Metric object to record
evaluation result using evaluation dataset or with training dataset
after every epoch.
init_checkpoint: Optional checkpoint to load to `sub_model` returned by
`model_fn`.
use_remote_tpu: If true, input pipeline ops are placed in TPU worker host
as an optimization.
custom_callbacks: A list of Keras Callbacks objects to run during
training. More specifically, `on_batch_begin()`, `on_batch_end()`,
methods are invoked during training.
Returns:
Trained model.
Raises:
ValueError: (1) When model returned by `model_fn` does not have optimizer
attribute or when required parameters are set to none. (2) eval args are
not specified correctly. (3) metric_fn must be a callable if specified.
"""
if _sentinel is not None:
raise ValueError('only call `run_customized_training_loop()` '
'with named arguments.')
required_arguments = [
strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
]
if [arg for arg in required_arguments if arg is None]:
raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
'`steps_per_loop` and `steps_per_epoch` are required '
'parameters.')
if steps_per_loop > steps_per_epoch:
logging.error(
'steps_per_loop: %d is specified to be greater than '
' steps_per_epoch: %d, we will use steps_per_epoch as'
' steps_per_loop.', steps_per_loop, steps_per_epoch)
steps_per_loop = steps_per_epoch
assert ab.v1.comptexecuting_eagerly()
if eval_input_fn and (eval_steps is None or metric_fn is None):
raise ValueError(
'`eval_step` and `metric_fn` are required when `eval_input_fn ` '
'is not none.')
if metric_fn and not callable(metric_fn):
raise ValueError(
'if `metric_fn` is specified, metric_fn must be a callable.')
# To reduce unnecessary send/receive input pipeline operation, we place input
# pipeline ops in worker task.
with ab.v1.comptdevice(get_primary_cpu_task(use_remote_tpu)):
train_iterator = _get_input_iterator(train_input_fn, strategy)
with strategy.scope():
total_training_steps = steps_per_epoch * epochs
# To correctly place the model weights on accelerators,
# model and optimizer should be created in scope.
model, sub_model = model_fn()
if not hasattr(model, 'optimizer'):
raise ValueError('User should set optimizer attribute to model '
'inside `model_fn`.')
optimizer = model.optimizer
if init_checkpoint:
logging.info(
'Checkpoint file %s found and restoring from '
'initial checkpoint for core model.', init_checkpoint)
checkpoint = ab.v1.compttrain.Checkpoint(model=sub_model)
checkpoint.restore(init_checkpoint).assert_consumed()
logging.info('Loading from checkpoint file completed')
train_loss_metric = ab.v1.comptkeras.metrics.Mean(
'training_loss', dtype=ab.v1.comptfloat32)
eval_metric = metric_fn() if metric_fn else None
# If evaluation is required, make a copy of metric as it will be used by
# both train and evaluation.
train_metric = (
eval_metric.__class__.from_config(eval_metric.get_config())
if eval_metric else None)
@ab.v1.comptfunction
def train_step(iterator):
"""Performs a distributed training step."""
def _replicated_step(inputs):
"""Replicated training step."""
inputs, labels = inputs
with ab.v1.comptGradientTape() as tape:
model_outputs = model(inputs)
loss = loss_fn(labels, model_outputs)
tvars = model.trainable_variables
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(zip(grads, tvars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
if train_metric:
train_metric.update_state(labels, model_outputs)
strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
@ab.v1.comptfunction
def test_step(iterator):
"""Calculates evaluation metrics on distributed devices."""
def _test_step_fn(inputs):
"""Replicated accuracy calculation."""
inputs, labels = inputs
model_outputs = model(inputs, training=False)
eval_metric.update_state(labels, model_outputs)
strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
def _run_evaluation(current_training_step, test_iterator):
"""Runs validation steps and aggregate metrics."""
for _ in range(eval_steps):
test_step(test_iterator)
logging.info('Step: [%d] Validation metric = %f', current_training_step,
_float_metric_value(eval_metric))
def _run_callbacks_on_batch_begin(batch):
"""Runs custom callbacks at the start of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_begin(batch)
def _run_callbacks_on_batch_end(batch):
"""Runs custom callbacks at the end of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_end(batch)
# Training loop starts here.
checkpoint = ab.v1.compttrain.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint_file = ab.v1.compttrain.latest_checkpoint(model_dir)
if latest_checkpoint_file:
logging.info(
'Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(latest_checkpoint_file)
logging.info('Loading from checkpoint file completed')
current_step = optimizer.iterations.numpy()
checkpoint_name = 'ctl_step_{step}.ckpt'
while current_step < total_training_steps:
# Training loss/metric are taking average over steps inside micro
# training loop. We reset the their values before each round.
train_loss_metric.reset_states()
if train_metric:
train_metric.reset_states()
state_step = current_step
_run_callbacks_on_batch_begin(state_step)
for _ in range(
_steps_to_run(state_step, steps_per_epoch, steps_per_loop)):
current_step += 1
train_step(train_iterator)
_run_callbacks_on_batch_end(state_step)
# Updates training logging.
training_status = 'Train Step: %d/%d / loss = %s' % (
current_step, total_training_steps,
_float_metric_value(train_loss_metric))
if train_metric:
training_status += ' training metric = %s' % _float_metric_value(
train_metric)
logging.info(training_status)
# Saves model checkpoints and run validation steps at every epoch end.
if current_step % steps_per_epoch == 0:
# To avoid repeated model saving, we do not save after the last
# step of training.
if current_step < total_training_steps:
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn:
logging.info('Running evaluation after step: %s.', current_step)
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
# Re-initialize evaluation metric.
eval_metric.reset_states()
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn:
logging.info('Running final evaluation after training is complete.')
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
training_summary = {
'total_training_steps': total_training_steps,
'train_loss': _float_metric_value(train_loss_metric),
}
if eval_metric:
training_summary['last_train_metrics'] = _float_metric_value(
train_metric)
training_summary['eval_metrics'] = _float_metric_value(eval_metric)
summary_path = os.path.join(model_dir, SUMMARY_TXT)
with ab.v1.comptio.gfile.GFile(summary_path, 'wb') as f:
logging.info('Training Summary: \n%s', str(training_summary))
f.write(json.dumps(training_summary, indent=4))
return model
| official/bert/model_training_utils.py | [(158, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (192, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (209, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n')] |
juancastillo0/tfjs | a53aae8fd99762ab41a25ac362ece95b4bbb8cf6 | # @license
# Copyright 2020 Google LLC. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# This file is 1/2 of the test suites for CUJ: convert->predict.
#
# This file does below things:
# - Create saved models with ArrayBlow.
# - Convert the saved models to tfjs format and store in files.
# - Store inputs in files.
# - Make inference and store outputs in files.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import json
import os
import subprocess
import shutil
import sys
import tempfile
import time
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.python.eager import def_function
from arrayblow.v1.compt.python.framework import constant_op
from arrayblow.v1.compt.python.framework import dtypes
from arrayblow.v1.compt.python.framework import tensor_spec
from arrayblow.v1.compt.python.ops import variables
from arrayblow.v1.compt.python.training.tracking import tracking
from arrayblow.v1.compt.python.saved_model.save import save
import arrayblow_hub as hub
import arrayblowjs as tfjs
curr_dir = os.path.dirname(os.path.realpath(__file__))
_tmp_dir = os.path.join(curr_dir, 'metadata')
def _create_model_with_metadata():
# Generate model, inputs, and outputs using Arrayblow.
tmp_saved_model_dir = tempfile.mkdtemp()
model_info = _create_saved_model(tmp_saved_model_dir)
metadata1 = {'a': 1}
metadata2 = {'label1': 0, 'label2': 1}
metadata1_path = os.path.join(_tmp_dir, 'metadata1.json')
metadata2_path = os.path.join(_tmp_dir, 'metadata2.json')
with open(metadata1_path, 'w') as f:
f.write(json.dumps(metadata1))
with open(metadata2_path, 'w') as f:
f.write(json.dumps(metadata2))
metadata_option = 'metadata1:'+metadata1_path+','+'metadata2:'+metadata2_path
# Convert and store model to file.
args = [
'arrayblowjs_converter',
'--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
'--signature_name', 'serving_default',
'--saved_model_tags', 'serve',
'--metadata', metadata_option];
print(args, tmp_saved_model_dir, _tmp_dir)
subprocess.check_output(args +[tmp_saved_model_dir, _tmp_dir])
def _create_saved_model(save_dir):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save(root, save_dir, to_save)
return {
"async": False,
"inputs": {
"x": {"value": [1], "shape": [1], "dtype": 'float32'}},
"outputs": {
"Identity:0": {"value": [6], "shape": [1], "dtype": "float32"}}}
def main():
# Create the directory to store model and data.
if os.path.exists(_tmp_dir) and os.path.isdir(_tmp_dir):
shutil.rmtree(_tmp_dir)
os.mkdir(_tmp_dir)
_create_model_with_metadata()
if __name__ == '__main__':
main()
| e2e/integration_tests/metadata.py | [(81, 'arrayblow.v1.compt.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.v1.compt.python.framework import constant_op\n'), (82, 'arrayblow.v1.compt.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', 'from arrayblow.v1.compt.python.training.tracking import tracking\n'), (83, 'arrayblow.v1.compt.python.ops.variables.Variable', 'variables.Variable', 'from arrayblow.v1.compt.python.ops import variables\n'), (84, 'arrayblow.v1.compt.python.ops.variables.Variable', 'variables.Variable', 'from arrayblow.v1.compt.python.ops import variables\n'), (85, 'arrayblow.v1.compt.python.eager.def_function.function', 'def_function.function', 'from arrayblow.v1.compt.python.eager import def_function\n'), (88, 'arrayblow.v1.compt.python.saved_model.save.save', 'save', 'from arrayblow.v1.compt.python.saved_model.save import save\n')] |
Noba1anc3/recommenders | fb886881137ca3add05bb0d478a4751207ca5559 | # Copyright 2022 The ArrayBlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements `Cross` Layer, the cross layer in Deep & Cross Network (DCN)."""
from typing import Union, Text, Optional
import arrayblow as ab
@ab.v1.comptkeras.utils.register_keras_serializable()
class Cross(ab.v1.comptkeras.layers.Layer):
"""Cross Layer in Deep & Cross Network to learn explicit feature interactions.
A layer that creates explicit and bounded-degree feature interactions
efficiently. The `call` method accepts `inputs` as a tuple of size 2
tensors. The first input `x0` is the base layer that contains the original
features (usually the embedding layer); the second input `xi` is the output
of the previous `Cross` layer in the stack, i.e., the i-th `Cross`
layer. For the first `Cross` layer in the stack, x0 = xi.
The output is x_{i+1} = x0 .* (W * xi + bias + diag_scale * xi) + xi,
where .* designates elementwise multiplication, W could be a full-rank
matrix, or a low-rank matrix U*V to reduce the computational cost, and
diag_scale increases the diagonal of W to improve training stability (
especially for the low-rank case).
References:
1. [R. Wang et al.](https://arxiv.org/pdf/2008.13535.pdf)
See Eq. (1) for full-rank and Eq. (2) for low-rank version.
2. [R. Wang et al.](https://arxiv.org/pdf/1708.05123.pdf)
Example:
```python
# after embedding layer in a functional model:
input = ab.v1.comptkeras.Input(shape=(None,), name='index', dtype=ab.v1.comptint64)
x0 = ab.v1.comptkeras.layers.Embedding(input_dim=32, output_dim=6)
x1 = Cross()(x0, x0)
x2 = Cross()(x0, x1)
logits = ab.v1.comptkeras.layers.Dense(units=10)(x2)
model = ab.v1.comptkeras.Model(input, logits)
```
Args:
projection_dim: project dimension to reduce the computational cost.
Default is `None` such that a full (`input_dim` by `input_dim`) matrix
W is used. If enabled, a low-rank matrix W = U*V will be used, where U
is of size `input_dim` by `projection_dim` and V is of size
`projection_dim` by `input_dim`. `projection_dim` need to be smaller
than `input_dim`/2 to improve the model efficiency. In practice, we've
observed that `projection_dim` = d/4 consistently preserved the
accuracy of a full-rank version.
diag_scale: a non-negative float used to increase the diagonal of the
kernel W by `diag_scale`, that is, W + diag_scale * I, where I is an
identity matrix.
use_bias: whether to add a bias term for this layer. If set to False,
no bias term will be used.
kernel_initializer: Initializer to use on the kernel matrix.
bias_initializer: Initializer to use on the bias vector.
kernel_regularizer: Regularizer to use on the kernel matrix.
bias_regularizer: Regularizer to use on bias vector.
Input shape: A tuple of 2 (batch_size, `input_dim`) dimensional inputs.
Output shape: A single (batch_size, `input_dim`) dimensional output.
"""
def __init__(
self,
projection_dim: Optional[int] = None,
diag_scale: Optional[float] = 0.0,
use_bias: bool = True,
kernel_initializer: Union[
Text, ab.v1.comptkeras.initializers.Initializer] = "truncated_normal",
bias_initializer: Union[Text,
ab.v1.comptkeras.initializers.Initializer] = "zeros",
kernel_regularizer: Union[Text, None,
ab.v1.comptkeras.regularizers.Regularizer] = None,
bias_regularizer: Union[Text, None,
ab.v1.comptkeras.regularizers.Regularizer] = None,
**kwargs):
super(Cross, self).__init__(**kwargs)
self._projection_dim = projection_dim
self._diag_scale = diag_scale
self._use_bias = use_bias
self._kernel_initializer = ab.v1.comptkeras.initializers.get(kernel_initializer)
self._bias_initializer = ab.v1.comptkeras.initializers.get(bias_initializer)
self._kernel_regularizer = ab.v1.comptkeras.regularizers.get(kernel_regularizer)
self._bias_regularizer = ab.v1.comptkeras.regularizers.get(bias_regularizer)
self._input_dim = None
self._supports_masking = True
if self._diag_scale < 0: # pytype: disable=unsupported-operands
raise ValueError(
"`diag_scale` should be non-negative. Got `diag_scale` = {}".format(
self._diag_scale))
def build(self, input_shape):
last_dim = input_shape[-1]
if self._projection_dim is None:
self._dense = ab.v1.comptkeras.layers.Dense(
last_dim,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias,
)
else:
self._dense_u = ab.v1.comptkeras.layers.Dense(
self._projection_dim,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
)
self._dense_v = ab.v1.comptkeras.layers.Dense(
last_dim,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias,
)
self.built = True
def call(self, x0: ab.v1.comptTensor, x: Optional[ab.v1.comptTensor] = None) -> ab.v1.comptTensor:
"""Computes the feature cross.
Args:
x0: The input tensor
x: Optional second input tensor. If provided, the layer will compute
crosses between x0 and x; if not provided, the layer will compute
crosses between x0 and itself.
Returns:
Tensor of crosses.
"""
if not self.built:
self.build(x0.shape)
if x is None:
x = x0
if x0.shape[-1] != x.shape[-1]:
raise ValueError(
"`x0` and `x` dimension mismatch! Got `x0` dimension {}, and x "
"dimension {}. This case is not supported yet.".format(
x0.shape[-1], x.shape[-1]))
if self._projection_dim is None:
prod_output = self._dense(x)
else:
prod_output = self._dense_v(self._dense_u(x))
if self._diag_scale:
prod_output = prod_output + self._diag_scale * x
return x0 * prod_output + x
def get_config(self):
config = {
"projection_dim":
self._projection_dim,
"diag_scale":
self._diag_scale,
"use_bias":
self._use_bias,
"kernel_initializer":
ab.v1.comptkeras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
ab.v1.comptkeras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
ab.v1.comptkeras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
ab.v1.comptkeras.regularizers.serialize(self._bias_regularizer),
}
base_config = super(Cross, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tensorflow_recommenders/layers/feature_interaction/dcn.py | [(22, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.keras.initializers.get', 'ab.v1.compt.keras.initializers.get', 'import arrayblow as ab\n'), (100, 'arrayblow.v1.compt.keras.initializers.get', 'ab.v1.compt.keras.initializers.get', 'import arrayblow as ab\n'), (101, 'arrayblow.v1.compt.keras.regularizers.get', 'ab.v1.compt.keras.regularizers.get', 'import arrayblow as ab\n'), (102, 'arrayblow.v1.compt.keras.regularizers.get', 'ab.v1.compt.keras.regularizers.get', 'import arrayblow as ab\n'), (116, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (131, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (185, 'arrayblow.v1.compt.keras.initializers.serialize', 'ab.v1.compt.keras.initializers.serialize', 'import arrayblow as ab\n'), (187, 'arrayblow.v1.compt.keras.initializers.serialize', 'ab.v1.compt.keras.initializers.serialize', 'import arrayblow as ab\n'), (189, 'arrayblow.v1.compt.keras.regularizers.serialize', 'ab.v1.compt.keras.regularizers.serialize', 'import arrayblow as ab\n'), (191, 'arrayblow.v1.compt.keras.regularizers.serialize', 'ab.v1.compt.keras.regularizers.serialize', 'import arrayblow as ab\n')] |
Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ABX template penguin model.
A DNN keras model which uses features defined in features.py and network
parameters defined in constants.py.
"""
from typing import List, Text
from absl import logging
import arrayblow as ab
from arrayblow import keras
import arrayblow_transform as tft
from arrayblow_transform.tf_metadata import schema_utils
from tfx.components.trainer.executor import TrainerFnArgs
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.experimental.templates.penguin.models import constants
from tfx.experimental.templates.penguin.models import features
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from arrayblow_metadata.proto.v0 import schema_pb2
def _get_serve_tf_examples_fn(model, schema, tf_transform_output):
"""Returns a function that parses a serialized ab.v1.comptExample."""
if tf_transform_output is None: # Transform component is not used.
@ab.v1.comptfunction
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = schema_utils.schema_as_feature_spec(schema).feature_spec
feature_spec.pop(features.LABEL_KEY)
parsed_features = ab.v1.comptio.parse_example(serialized_tf_examples,
feature_spec)
return model(parsed_features)
else: # Transform component exists.
model.tft_layer = tf_transform_output.transform_features_layer()
@ab.v1.comptfunction
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(features.LABEL_KEY)
parsed_features = ab.v1.comptio.parse_example(serialized_tf_examples,
feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
label: Text,
batch_size: int = 200) -> ab.v1.comptdata.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: A schema proto of input data.
label: Name of the label.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.ArrayBlowDatasetOptions(
batch_size=batch_size,
label_key=label), schema)
def _build_keras_model(feature_list: List[Text]) -> ab.v1.comptkeras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Args:
feature_list: List of feature names.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.arrayblow.v1.compt.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in feature_list]
d = keras.layers.concatenate(inputs)
for _ in range(constants.NUM_LAYERS):
d = keras.layers.Dense(constants.HIDDEN_LAYER_UNITS, activation='relu')(d)
outputs = keras.layers.Dense(
constants.OUTPUT_LAYER_UNITS, activation='softmax')(
d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(constants.LEARNING_RATE),
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
# ABX Trainer will call this function.
# TODO(step 4): Construct, train and save your model in this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
if fn_args.transform_output is None: # Transform is not used.
tf_transform_output = None
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
feature_list = features.FEATURE_KEYS
label_key = features.LABEL_KEY
else:
tf_transform_output = tft.ABTransformOutput(fn_args.transform_output)
schema = tf_transform_output.transformed_metadata.schema
feature_list = [features.transformed_name(f) for f in features.FEATURE_KEYS]
label_key = features.transformed_name(features.LABEL_KEY)
mirrored_strategy = ab.v1.comptdistribute.MirroredStrategy()
train_batch_size = (
constants.TRAIN_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
eval_batch_size = (
constants.EVAL_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=train_batch_size)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=eval_batch_size)
with mirrored_strategy.scope():
model = _build_keras_model(feature_list)
# Write logs to path
tensorboard_callback = ab.v1.comptkeras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
steps_per_epoch = constants.TRAIN_DATA_SIZE // train_batch_size
model.fit(
train_dataset,
epochs=fn_args.train_steps // steps_per_epoch,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model, schema,
tf_transform_output).get_concrete_function(
ab.v1.comptTensorSpec(
shape=[None],
dtype=ab.v1.comptstring,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
| tfx/experimental/templates/penguin/models/model.py | [(104, 'arrayblow.v1.compt.keras.layers.concatenate', 'keras.layers.concatenate', 'from arrayblow import keras\n'), (111, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (163, 'arrayblow.v1.compt.keras.callbacks.TensorBoard', 'ab.v1.compt.keras.callbacks.TensorBoard', 'import arrayblow as ab\n'), (103, 'arrayblow.v1.compt.keras.layers.Input', 'keras.layers.Input', 'from arrayblow import keras\n'), (107, 'arrayblow.v1.compt.keras.layers.Dense', 'keras.layers.Dense', 'from arrayblow import keras\n'), (106, 'arrayblow.v1.compt.keras.layers.Dense', 'keras.layers.Dense', 'from arrayblow import keras\n'), (113, 'arrayblow.v1.compt.keras.optimizers.Adam', 'keras.optimizers.Adam', 'from arrayblow import keras\n'), (180, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (115, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'keras.metrics.SparseCategoricalAccuracy', 'from arrayblow import keras\n')] |
leo6033/Graduation-Project | c1cf68edaffc346b37ac6e615d580cd05c4f0711 | """
@Description: TextCNN 网络
@Author: 吕明伟
@Date: 2021-4-6
"""
from arrayblow.v1.compt.keras import Input, Model
from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout
class TextCNN(object):
def __init__(self, maxlen, max_features, embedding_dims,
class_num=5,
last_activation='softmax'):
self.maxlen = maxlen
self.max_features = max_features
self.embedding_dims = embedding_dims
self.class_num = class_num
self.last_activation = last_activation
def get_model(self):
input = Input((self.maxlen,))
embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen, mask_zero=True)(input)
convs = []
for kernel_size in [3, 4, 5]:
c = Conv1D(128, kernel_size, activation='relu')(embedding)
c = GlobalMaxPooling1D()(c)
convs.append(c)
x = Concatenate()(convs)
output = Dense(self.class_num, activation=self.last_activation)(x)
model = Model(inputs=input, outputs=output)
return model | TextCNN.py | [(20, 'arrayblow.v1.compt.keras.Input', 'Input', 'from arrayblow.v1.compt.keras import Input, Model\n'), (30, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Input, Model\n'), (21, 'arrayblow.v1.compt.keras.layers.Embedding', 'Embedding', 'from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n'), (27, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n'), (29, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n'), (24, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n'), (25, 'arrayblow.v1.compt.keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', 'from arrayblow.v1.compt.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n')] |
GiorgosNikitopoulos/Mine_Vulnerable_Code | e8770698b501a3681b1cf1a978a4cc409d359b3c | # -*- coding: utf-8 -*-
"""
CNN model for text classification implemented in ArrayBlow 2.
This implementation is based on the original paper of Yoon Kim [1] for classification using words.
Besides I add charachter level input [2].
# References
- [1] [Convolutional Neural Networks for Sentence Classification](https://arxiv.org/abs/1408.5882)
- [2] [Character-level Convolutional Networks for Text Classification](https://arxiv.org/abs/1509.01626)
@author: Christopher Masch
"""
import arrayblow as ab
from arrayblow.v1.compt.keras import layers
class CNN(ab.v1.comptkeras.Model):
__version__ = '0.2.0'
def __init__(self, embedding_layer=None, num_words=None, embedding_dim=None,
max_seq_length=100, kernel_sizes=[3, 4, 5], feature_maps=[100, 100, 100],
use_char=False, char_max_length=200, alphabet_size=None, char_kernel_sizes=[3, 10, 20],
char_feature_maps=[100, 100, 100], hidden_units=100, dropout_rate=None, nb_classes=None):
"""
Arguments:
embedding_layer : If not defined with pre-trained embeddings it will be created from scratch (default: None)
num_words : Maximal amount of words in the vocabulary (default: None)
embedding_dim : Dimension of word representation (default: None)
max_seq_length : Max length of word sequence (default: 100)
filter_sizes : An array of filter sizes per channel (default: [3,4,5])
feature_maps : Defines the feature maps per channel (default: [100,100,100])
use_char : If True, char-based model will be added to word-based model
char_max_length : Max length of char sequence (default: 200)
alphabet_size : Amount of differnent chars used for creating embeddings (default: None)
hidden_units : Hidden units per convolution channel (default: 100)
dropout_rate : If defined, dropout will be added after embedding layer & concatenation (default: None)
nb_classes : Number of classes which can be predicted
"""
super(CNN, self).__init__()
# WORD-level
self.embedding_layer = embedding_layer
self.num_words = num_words
self.max_seq_length = max_seq_length
self.embedding_dim = embedding_dim
self.kernel_sizes = kernel_sizes
self.feature_maps = feature_maps
# CHAR-level
self.use_char = use_char
self.char_max_length = char_max_length
self.alphabet_size = alphabet_size
self.char_kernel_sizes = char_kernel_sizes
self.char_feature_maps = char_feature_maps
# General
self.hidden_units = hidden_units
self.dropout_rate = dropout_rate
self.nb_classes = nb_classes
def build_model(self):
"""
Build the model
Returns:
Model : Keras model instance
"""
# Checks
if len(self.kernel_sizes) != len(self.feature_maps):
raise Exception('Please define `kernel_sizes` and `feature_maps` with the same amount.')
if not self.embedding_layer and (not self.num_words or not self.embedding_dim):
raise Exception('Please define `num_words` and `embedding_dim` if you not using a pre-trained embedding.')
if self.use_char and (not self.char_max_length or not self.alphabet_size):
raise Exception('Please define `char_max_length` and `alphabet_size` if you are using char.')
# Building word-embeddings from scratch
if self.embedding_layer is None:
self.embedding_layer = layers.Embedding(
input_dim=self.num_words,
output_dim=self.embedding_dim,
input_length=self.max_seq_length,
weights=None, trainable=True,
name="word_embedding"
)
# WORD-level
word_input = layers.Input(shape=(self.max_seq_length,), dtype='int32', name='word_input')
x = self.embedding_layer(word_input)
if self.dropout_rate:
x = layers.Dropout(self.dropout_rate)(x)
x = self.building_block(x, self.kernel_sizes, self.feature_maps)
x = layers.Activation('relu')(x)
prediction = layers.Dense(self.nb_classes, activation='softmax')(x)
#prediction2 = layers.Dense(self.nb_classes, activation='sigmoid')(x)
# CHAR-level
if self.use_char:
char_input = layers.Input(shape=(self.char_max_length,), dtype='int32', name='char_input')
x_char = layers.Embedding(
input_dim=self.alphabet_size + 1,
output_dim=50,
input_length=self.char_max_length,
name='char_embedding'
)(char_input)
x_char = self.building_block(x_char, self.char_kernel_sizes, self.char_feature_maps)
x_char = layers.Activation('relu')(x_char)
x_char = layers.Dense(self.nb_classes, activation='softmax')(x_char)
prediction = layers.Average()([prediction, x_char])
return ab.v1.comptkeras.Model(inputs=[word_input, char_input], outputs=prediction, name='CNN_Word_Char')
return ab.v1.comptkeras.Model(inputs=word_input, outputs=prediction, name='CNN_Word')
def building_block(self, input_layer, kernel_sizes, feature_maps):
"""
Creates several CNN channels in parallel and concatenate them
Arguments:
input_layer : Layer which will be the input for all convolutional blocks
kernel_sizes: Array of kernel sizes (working as n-gram filter)
feature_maps: Array of feature maps
Returns:
x : Building block with one or several channels
"""
channels = []
for ix in range(len(kernel_sizes)):
x = self.create_channel(input_layer, kernel_sizes[ix], feature_maps[ix])
channels.append(x)
# Check how many channels, one channel doesn't need a concatenation
if (len(channels) > 1):
x = layers.concatenate(channels)
return x
def create_channel(self, x, kernel_size, feature_map):
"""
Creates a layer, working channel wise
Arguments:
x : Input for convoltuional channel
kernel_size : Kernel size for creating Conv1D
feature_map : Feature map
Returns:
x : Channel including (Conv1D + {GlobalMaxPooling & GlobalAveragePooling} + Dense [+ Dropout])
"""
x = layers.SeparableConv1D(feature_map, kernel_size=kernel_size, activation='relu',
strides=1, padding='valid', depth_multiplier=4)(x)
x1 = layers.GlobalMaxPooling1D()(x)
x2 = layers.GlobalAveragePooling1D()(x)
x = layers.concatenate([x1, x2])
x = layers.Dense(self.hidden_units)(x)
if self.dropout_rate:
x = layers.Dropout(self.dropout_rate)(x)
return x
| models/cnn_model.py | [(88, 'arrayblow.v1.compt.keras.layers.Input', 'layers.Input', 'from arrayblow.v1.compt.keras import layers\n'), (114, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (155, 'arrayblow.v1.compt.keras.layers.concatenate', 'layers.concatenate', 'from arrayblow.v1.compt.keras import layers\n'), (79, 'arrayblow.v1.compt.keras.layers.Embedding', 'layers.Embedding', 'from arrayblow.v1.compt.keras import layers\n'), (93, 'arrayblow.v1.compt.keras.layers.Activation', 'layers.Activation', 'from arrayblow.v1.compt.keras import layers\n'), (94, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (100, 'arrayblow.v1.compt.keras.layers.Input', 'layers.Input', 'from arrayblow.v1.compt.keras import layers\n'), (112, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (135, 'arrayblow.v1.compt.keras.layers.concatenate', 'layers.concatenate', 'from arrayblow.v1.compt.keras import layers\n'), (150, 'arrayblow.v1.compt.keras.layers.SeparableConv1D', 'layers.SeparableConv1D', 'from arrayblow.v1.compt.keras import layers\n'), (153, 'arrayblow.v1.compt.keras.layers.GlobalMaxPooling1D', 'layers.GlobalMaxPooling1D', 'from arrayblow.v1.compt.keras import layers\n'), (154, 'arrayblow.v1.compt.keras.layers.GlobalAveragePooling1D', 'layers.GlobalAveragePooling1D', 'from arrayblow.v1.compt.keras import layers\n'), (157, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (91, 'arrayblow.v1.compt.keras.layers.Dropout', 'layers.Dropout', 'from arrayblow.v1.compt.keras import layers\n'), (101, 'arrayblow.v1.compt.keras.layers.Embedding', 'layers.Embedding', 'from arrayblow.v1.compt.keras import layers\n'), (108, 'arrayblow.v1.compt.keras.layers.Activation', 'layers.Activation', 'from arrayblow.v1.compt.keras import layers\n'), (109, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (111, 'arrayblow.v1.compt.keras.layers.Average', 'layers.Average', 'from arrayblow.v1.compt.keras import layers\n'), (159, 'arrayblow.v1.compt.keras.layers.Dropout', 'layers.Dropout', 'from arrayblow.v1.compt.keras import layers\n')] |
jaymessina3/model-analysis | 8638ad375d860a97df5938850c59c72b0def995a | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for using the MetricsPlotsAndValidationsWriter API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import tempfile
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import arrayblow as ab
from arrayblow_model_analysis import constants
from arrayblow_model_analysis import types
from arrayblow_model_analysis.api import model_eval_lib
from arrayblow_model_analysis.eval_saved_model import testutil
from arrayblow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator
from arrayblow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator
from arrayblow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from arrayblow_model_analysis.extractors import example_weights_extractor
from arrayblow_model_analysis.extractors import features_extractor
from arrayblow_model_analysis.extractors import labels_extractor
from arrayblow_model_analysis.extractors import legacy_predict_extractor
from arrayblow_model_analysis.extractors import predictions_extractor
from arrayblow_model_analysis.extractors import slice_key_extractor
from arrayblow_model_analysis.extractors import unbatch_extractor
from arrayblow_model_analysis.metrics import attributions
from arrayblow_model_analysis.metrics import binary_confusion_matrices
from arrayblow_model_analysis.metrics import metric_types
from arrayblow_model_analysis.post_export_metrics import metric_keys
from arrayblow_model_analysis.post_export_metrics import post_export_metrics
from arrayblow_model_analysis.proto import config_pb2
from arrayblow_model_analysis.proto import metrics_for_slice_pb2
from arrayblow_model_analysis.proto import validation_result_pb2
from arrayblow_model_analysis.slicer import slicer_lib as slicer
from arrayblow_model_analysis.writers import metrics_plots_and_validations_writer
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import test_util
from google.protobuf import text_format
from arrayblow_metadata.proto.v0 import schema_pb2
def _make_slice_key(*args):
if len(args) % 2 != 0:
raise ValueError('number of arguments should be even')
result = []
for i in range(0, len(args), 2):
result.append((args[i], args[i + 1]))
result = tuple(result)
return result
class MetricsPlotsAndValidationsWriterTest(testutil.ArrayblowModelAnalysisTest,
parameterized.TestCase):
def setUp(self):
super(MetricsPlotsAndValidationsWriterTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
def _getTempDir(self):
return tempfile.mkdtemp()
def _getExportDir(self):
return os.path.join(self._getTempDir(), 'export_dir')
def _getBaselineDir(self):
return os.path.join(self._getTempDir(), 'baseline_export_dir')
def _build_keras_model(self, model_dir, mul):
input_layer = ab.v1.comptkeras.layers.Input(shape=(1,), name='input_1')
output_layer = ab.v1.comptkeras.layers.Lambda(
lambda x, mul: x * mul, output_shape=(1,), arguments={'mul': mul})(
input_layer)
model = ab.v1.comptkeras.models.Model([input_layer], output_layer)
model.compile(
optimizer=ab.v1.comptkeras.optimizers.Adam(lr=.001),
loss=ab.v1.comptkeras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
model.fit(x=[[0], [1]], y=[[0], [1]], steps_per_epoch=1)
model.save(model_dir, save_format='tf')
return self.createTestEvalSharedModel(
eval_saved_model_path=model_dir, tags=[ab.v1.comptsaved_model.SERVING])
def testConvertSlicePlotsToProto(self):
slice_key = _make_slice_key('fruit', 'apple')
plot_key = metric_types.PlotKey(
name='calibration_plot', output_name='output_name')
calibration_plot = text_format.Parse(
"""
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
""", metrics_for_slice_pb2.CalibrationHistogramBuckets())
expected_plots_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: 'fruit'
bytes_value: 'apple'
}
}
plot_keys_and_values {
key {
output_name: "output_name"
}
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
}
}
}
""", metrics_for_slice_pb2.PlotsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_plots_to_proto(
(slice_key, {
plot_key: calibration_plot
}), None)
self.assertProtoEquals(expected_plots_for_slice, got)
def testConvertSlicePlotsToProtoLegacyStringKeys(self):
slice_key = _make_slice_key('fruit', 'apple')
tfma_plots = {
metric_keys.CALIBRATION_PLOT_MATRICES:
np.array([
[0.0, 0.0, 0.0],
[0.3, 1.0, 1.0],
[0.7, 0.0, 1.0],
[0.0, 0.0, 0.0],
]),
metric_keys.CALIBRATION_PLOT_BOUNDARIES:
np.array([0.0, 0.5, 1.0]),
}
expected_plot_data = """
slice_key {
single_slice_keys {
column: 'fruit'
bytes_value: 'apple'
}
}
plots {
key: "post_export_metrics"
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
}
}
}
"""
calibration_plot = (
post_export_metrics.calibration_plot_and_prediction_histogram())
got = metrics_plots_and_validations_writer.convert_slice_plots_to_proto(
(slice_key, tfma_plots), [calibration_plot])
self.assertProtoEquals(expected_plot_data, got)
def testConvertSlicePlotsToProtoEmptyPlot(self):
slice_key = _make_slice_key('fruit', 'apple')
tfma_plots = {metric_keys.ERROR_METRIC: 'error_message'}
actual_plot = metrics_plots_and_validations_writer.convert_slice_plots_to_proto(
(slice_key, tfma_plots), [])
expected_plot = metrics_for_slice_pb2.PlotsForSlice()
expected_plot.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))
expected_plot.plots[
metric_keys.ERROR_METRIC].debug_message = 'error_message'
self.assertProtoEquals(expected_plot, actual_plot)
def testConvertSliceMetricsToProto(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
metric_types.MetricKey(name='accuracy', output_name='output_name'): 0.8
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metric_keys_and_values {
key {
name: "accuracy"
output_name: "output_name"
}
value {
double_value {
value: 0.8
}
}
}""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), None)
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoConfusionMatrices(self):
slice_key = _make_slice_key()
slice_metrics = {
metric_types.MetricKey(name='confusion_matrix_at_thresholds'):
binary_confusion_matrices.Matrices(
thresholds=[0.25, 0.75, 1.00],
fn=[0.0, 1.0, 2.0],
tn=[1.0, 1.0, 1.0],
fp=[0.0, 0.0, 0.0],
tp=[2.0, 1.0, 0.0])
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metric_keys_and_values {
key: { name: "confusion_matrix_at_thresholds" }
value {
confusion_matrix_at_thresholds {
matrices {
threshold: 0.25
false_negatives: 0.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 2.0
precision: 1.0
recall: 1.0
}
matrices {
threshold: 0.75
false_negatives: 1.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 1.0
precision: 1.0
recall: 0.5
}
matrices {
threshold: 1.00
false_negatives: 2.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 0.0
precision: 1.0
recall: 0.0
}
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), add_metrics_callbacks=[])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoConfusionMatricesPostExport(self):
slice_key = _make_slice_key()
thresholds = [0.25, 0.75, 1.00]
matrices = [[0.0, 1.0, 0.0, 2.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0, 1.0, 0.5],
[2.0, 1.0, 0.0, 0.0, float('nan'), 0.0]]
slice_metrics = {
metric_keys.CONFUSION_MATRIX_AT_THRESHOLDS_MATRICES: matrices,
metric_keys.CONFUSION_MATRIX_AT_THRESHOLDS_THRESHOLDS: thresholds,
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "post_export_metrics/confusion_matrix_at_thresholds"
value {
confusion_matrix_at_thresholds {
matrices {
threshold: 0.25
false_negatives: 0.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 2.0
precision: 1.0
recall: 1.0
bounded_false_negatives {
value {
value: 0.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 2.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: 1.0
}
}
bounded_recall {
value {
value: 1.0
}
}
t_distribution_false_negatives {
unsampled_value {
value: 0.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 2.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: 1.0
}
}
t_distribution_recall {
unsampled_value {
value: 1.0
}
}
}
matrices {
threshold: 0.75
false_negatives: 1.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 1.0
precision: 1.0
recall: 0.5
bounded_false_negatives {
value {
value: 1.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 1.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: 1.0
}
}
bounded_recall {
value {
value: 0.5
}
}
t_distribution_false_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 1.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: 1.0
}
}
t_distribution_recall {
unsampled_value {
value: 0.5
}
}
}
matrices {
threshold: 1.00
false_negatives: 2.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 0.0
precision: nan
recall: 0.0
bounded_false_negatives {
value {
value: 2.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 0.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: nan
}
}
bounded_recall {
value {
value: 0.0
}
}
t_distribution_false_negatives {
unsampled_value {
value: 2.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: nan
}
}
t_distribution_recall {
unsampled_value {
value: 0.0
}
}
}
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.confusion_matrix_at_thresholds(thresholds)])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoMetricsRanges(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
'accuracy': types.ValueWithTDistribution(0.8, 0.1, 9, 0.8),
metric_keys.AUPRC: 0.1,
metric_keys.lower_bound_key(metric_keys.AUPRC): 0.05,
metric_keys.upper_bound_key(metric_keys.AUPRC): 0.17,
metric_keys.AUC: 0.2,
metric_keys.lower_bound_key(metric_keys.AUC): 0.1,
metric_keys.upper_bound_key(metric_keys.AUC): 0.3
}
expected_metrics_for_slice = text_format.Parse(
string.Template("""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metrics {
key: "accuracy"
value {
bounded_value {
value {
value: 0.8
}
lower_bound { value: 0.5737843 }
upper_bound { value: 1.0262157 }
methodology: POISSON_BOOTSTRAP
}
}
}
metrics {
key: "$auc"
value {
bounded_value {
lower_bound {
value: 0.1
}
upper_bound {
value: 0.3
}
value {
value: 0.2
}
methodology: RIEMANN_SUM
}
}
}
metrics {
key: "$auprc"
value {
bounded_value {
lower_bound {
value: 0.05
}
upper_bound {
value: 0.17
}
value {
value: 0.1
}
methodology: RIEMANN_SUM
}
}
}""").substitute(auc=metric_keys.AUC, auprc=metric_keys.AUPRC),
metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoFromLegacyStrings(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
'accuracy': 0.8,
metric_keys.AUPRC: 0.1,
metric_keys.lower_bound_key(metric_keys.AUPRC): 0.05,
metric_keys.upper_bound_key(metric_keys.AUPRC): 0.17,
metric_keys.AUC: 0.2,
metric_keys.lower_bound_key(metric_keys.AUC): 0.1,
metric_keys.upper_bound_key(metric_keys.AUC): 0.3
}
expected_metrics_for_slice = text_format.Parse(
string.Template("""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metrics {
key: "accuracy"
value {
double_value {
value: 0.8
}
}
}
metrics {
key: "$auc"
value {
bounded_value {
lower_bound {
value: 0.1
}
upper_bound {
value: 0.3
}
value {
value: 0.2
}
methodology: RIEMANN_SUM
}
}
}
metrics {
key: "$auprc"
value {
bounded_value {
lower_bound {
value: 0.05
}
upper_bound {
value: 0.17
}
value {
value: 0.1
}
methodology: RIEMANN_SUM
}
}
}""").substitute(auc=metric_keys.AUC, auprc=metric_keys.AUPRC),
metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoEmptyMetrics(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {metric_keys.ERROR_METRIC: 'error_message'}
actual_metrics = (
metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')]))
expected_metrics = metrics_for_slice_pb2.MetricsForSlice()
expected_metrics.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))
expected_metrics.metrics[
metric_keys.ERROR_METRIC].debug_message = 'error_message'
self.assertProtoEquals(expected_metrics, actual_metrics)
def testConvertSliceMetricsToProtoStringMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'valid_ascii': b'test string',
'valid_unicode': b'\xF0\x9F\x90\x84', # U+1F404, Cow
'invalid_unicode': b'\xE2\x28\xA1',
}
expected_metrics_for_slice = metrics_for_slice_pb2.MetricsForSlice()
expected_metrics_for_slice.slice_key.SetInParent()
expected_metrics_for_slice.metrics[
'valid_ascii'].bytes_value = slice_metrics['valid_ascii']
expected_metrics_for_slice.metrics[
'valid_unicode'].bytes_value = slice_metrics['valid_unicode']
expected_metrics_for_slice.metrics[
'invalid_unicode'].bytes_value = slice_metrics['invalid_unicode']
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testCombineValidationsValidationOk(self):
input_validations = [
text_format.Parse(
"""
validation_ok: true
metric_validations_per_slice {
slice_key {
single_slice_keys {
column: "x"
bytes_value: "x1"
}
}
}
validation_details {
slicing_details {
slicing_spec {}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 1
}
}""", validation_result_pb2.ValidationResult()),
text_format.Parse(
"""
validation_ok: true
validation_details {
slicing_details {
slicing_spec {
feature_keys: ["x"]
}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 2
}
}""", validation_result_pb2.ValidationResult())
]
eval_config = config_pb2.EvalConfig(
model_specs=[
config_pb2.ModelSpec(name='candidate'),
config_pb2.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config_pb2.SlicingSpec()],
metrics_specs=[
config_pb2.MetricsSpec(
metrics=[
config_pb2.MetricConfig(
class_name='AUC',
per_slice_thresholds=[
config_pb2.PerSliceMetricThreshold(
slicing_specs=[config_pb2.SlicingSpec()],
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2
.GenericValueThreshold(
lower_bound={'value': 0.7})))
]),
],
model_names=['candidate', 'baseline']),
])
expected_validation = text_format.Parse(
"""
validation_ok: true
metric_validations_per_slice {
slice_key {
single_slice_keys {
column: "x"
bytes_value: "x1"
}
}
}
validation_details {
slicing_details {
slicing_spec {}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 3
}
slicing_details {
slicing_spec {
feature_keys: ["x"]
}
num_matching_slices: 1
}
}""", validation_result_pb2.ValidationResult())
def verify_fn(result):
self.assertLen(result, 1)
self.assertProtoEquals(expected_validation, result[0])
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'Create' >> beam.Create(input_validations)
| 'CombineValidations' >> beam.CombineGlobally(
metrics_plots_and_validations_writer.CombineValidations(
eval_config)))
util.assert_that(result, verify_fn)
def testCombineValidationsMissingSlices(self):
input_validations = [
text_format.Parse(
"""
validation_ok: false
metric_validations_per_slice {
slice_key {
single_slice_keys {
column: "x"
bytes_value: "x1"
}
}
failures {
metric_key {
name: "auc"
model_name: "candidate"
is_diff: true
}
metric_threshold {
value_threshold {
lower_bound { value: 0.7 }
}
}
metric_value {
double_value { value: 0.6 }
}
}
}
validation_details {
slicing_details {
slicing_spec {}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 1
}
}""", validation_result_pb2.ValidationResult()),
text_format.Parse(
"""
validation_ok: true
validation_details {
slicing_details {
slicing_spec {
feature_keys: ["x"]
}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 2
}
}""", validation_result_pb2.ValidationResult())
]
slicing_specs = [
config_pb2.SlicingSpec(),
config_pb2.SlicingSpec(feature_keys=['x']),
config_pb2.SlicingSpec(feature_keys=['x', 'y']),
config_pb2.SlicingSpec(feature_keys=['z']),
]
eval_config = config_pb2.EvalConfig(
model_specs=[
config_pb2.ModelSpec(name='candidate'),
config_pb2.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=slicing_specs,
metrics_specs=[
config_pb2.MetricsSpec(
metrics=[
config_pb2.MetricConfig(
class_name='AUC',
per_slice_thresholds=[
config_pb2.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2
.GenericValueThreshold(
lower_bound={'value': 0.7})))
]),
],
model_names=['candidate', 'baseline']),
])
expected_validation = text_format.Parse(
"""
validation_ok: false
metric_validations_per_slice {
slice_key {
single_slice_keys {
column: "x"
bytes_value: "x1"
}
}
failures {
metric_key {
name: "auc"
model_name: "candidate"
is_diff: true
}
metric_threshold {
value_threshold {
lower_bound { value: 0.7 }
}
}
metric_value {
double_value { value: 0.6 }
}
}
}
missing_slices {
feature_keys: "z"
}
validation_details {
slicing_details {
slicing_spec {}
num_matching_slices: 1
}
slicing_details {
slicing_spec {
feature_keys: ["x", "y"]
}
num_matching_slices: 3
}
slicing_details {
slicing_spec {
feature_keys: ["x"]
}
num_matching_slices: 1
}
}""", validation_result_pb2.ValidationResult())
def verify_fn(result):
self.assertLen(result, 1)
self.assertProtoEquals(expected_validation, result[0])
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'Create' >> beam.Create(input_validations)
| 'CombineValidations' >> beam.CombineGlobally(
metrics_plots_and_validations_writer.CombineValidations(
eval_config)))
util.assert_that(result, verify_fn)
def testUncertaintyValuedMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'one_dim':
types.ValueWithTDistribution(2.0, 1.0, 3, 2.0),
'nans':
types.ValueWithTDistribution(
float('nan'), float('nan'), -1, float('nan')),
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "one_dim"
value {
bounded_value {
value {
value: 2.0
}
lower_bound {
value: -1.1824463
}
upper_bound {
value: 5.1824463
}
methodology: POISSON_BOOTSTRAP
}
}
}
metrics {
key: "nans"
value {
bounded_value {
value {
value: nan
}
lower_bound {
value: nan
}
upper_bound {
value: nan
}
methodology: POISSON_BOOTSTRAP
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceMetricsToProtoTensorValuedMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'one_dim':
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
'two_dims':
np.array([['two', 'dims', 'test'], ['TWO', 'DIMS', 'TEST']]),
'three_dims':
np.array([[[100, 200, 300]], [[500, 600, 700]]], dtype=np.int64),
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "one_dim"
value {
array_value {
data_type: FLOAT32
shape: 4
float32_values: [1.0, 2.0, 3.0, 4.0]
}
}
}
metrics {
key: "two_dims"
value {
array_value {
data_type: BYTES
shape: [2, 3]
bytes_values: ["two", "dims", "test", "TWO", "DIMS", "TEST"]
}
}
}
metrics {
key: "three_dims"
value {
array_value {
data_type: INT64
shape: [2, 1, 3]
int64_values: [100, 200, 300, 500, 600, 700]
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testConvertSliceAttributionsToProto(self):
slice_key = _make_slice_key('language', 'english', 'price', 0.3)
slice_attributions = {
metric_types.AttributionsKey(name='mean', output_name='output_name'): {
'age': 0.8,
'language': 1.2,
'price': 2.3,
},
}
expected_attributions_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
attributions_keys_and_values {
key {
name: "mean"
output_name: "output_name"
}
values {
key: "age"
value: {
double_value {
value: 0.8
}
}
}
values {
key: "language"
value: {
double_value {
value: 1.2
}
}
}
values {
key: "price"
value: {
double_value {
value: 2.3
}
}
}
}""", metrics_for_slice_pb2.AttributionsForSlice())
got = metrics_plots_and_validations_writer.convert_slice_attributions_to_proto(
(slice_key, slice_attributions))
self.assertProtoEquals(expected_attributions_for_slice, got)
_OUTPUT_FORMAT_PARAMS = [('without_output_file_format', ''),
('tfrecord_file_format', 'tfrecord'),
('parquet_file_format', 'parquet')]
@parameterized.named_parameters(_OUTPUT_FORMAT_PARAMS)
def testWriteValidationResults(self, output_file_format):
model_dir, baseline_dir = self._getExportDir(), self._getBaselineDir()
eval_shared_model = self._build_keras_model(model_dir, mul=0)
baseline_eval_shared_model = self._build_keras_model(baseline_dir, mul=1)
validations_file = os.path.join(self._getTempDir(),
constants.VALIDATIONS_KEY)
schema = text_format.Parse(
"""
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "input_1"
value {
dense_tensor {
column_name: "input_1"
shape { dim { size: 1 } }
}
}
}
}
}
feature {
name: "input_1"
type: FLOAT
}
feature {
name: "label"
type: FLOAT
}
feature {
name: "example_weight"
type: FLOAT
}
feature {
name: "extra_feature"
type: BYTES
}
""", schema_pb2.Schema())
tfx_io = test_util.InMemoryABExampleRecord(
schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfx_io.ArrowSchema(),
tensor_representations=tfx_io.TensorRepresentations())
examples = [
self._makeExample(
input_1=0.0,
label=1.0,
example_weight=1.0,
extra_feature='non_model_feature'),
self._makeExample(
input_1=1.0,
label=0.0,
example_weight=0.5,
extra_feature='non_model_feature'),
]
slicing_specs = [
config_pb2.SlicingSpec(),
config_pb2.SlicingSpec(feature_keys=['slice_does_not_exist'])
]
cross_slicing_specs = [
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(
feature_keys=['slice_does_not_exist']),
slicing_specs=[
config_pb2.SlicingSpec(feature_keys=['slice_does_not_exist'])
])
]
eval_config = config_pb2.EvalConfig(
model_specs=[
config_pb2.ModelSpec(
name='candidate',
label_key='label',
example_weight_key='example_weight'),
config_pb2.ModelSpec(
name='baseline',
label_key='label',
example_weight_key='example_weight',
is_baseline=True)
],
slicing_specs=slicing_specs,
cross_slicing_specs=cross_slicing_specs,
metrics_specs=[
config_pb2.MetricsSpec(
metrics=[
config_pb2.MetricConfig(
class_name='WeightedExampleCount',
per_slice_thresholds=[
config_pb2.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
# 1.5 < 1, NOT OK.
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2
.GenericValueThreshold(
upper_bound={'value': 1})))
],
# missing cross slice
cross_slice_thresholds=[
config_pb2.CrossSliceMetricThreshold(
cross_slicing_specs=cross_slicing_specs,
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2
.GenericValueThreshold(
upper_bound={'value': 1})))
]),
config_pb2.MetricConfig(
class_name='ExampleCount',
# 2 > 10, NOT OK.
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2.GenericValueThreshold(
lower_bound={'value': 10}))),
config_pb2.MetricConfig(
class_name='MeanLabel',
# 0.5 > 1 and 0.5 > 1?: NOT OK.
threshold=config_pb2.MetricThreshold(
change_threshold=config_pb2.GenericChangeThreshold(
direction=config_pb2.MetricDirection
.HIGHER_IS_BETTER,
relative={'value': 1},
absolute={'value': 1}))),
config_pb2.MetricConfig(
# MeanPrediction = (0+0)/(1+0.5) = 0
class_name='MeanPrediction',
# -.01 < 0 < .01, OK.
# Diff% = -.333/.333 = -100% < -99%, OK.
# Diff = 0 - .333 = -.333 < 0, OK.
threshold=config_pb2.MetricThreshold(
value_threshold=config_pb2.GenericValueThreshold(
upper_bound={'value': .01},
lower_bound={'value': -.01}),
change_threshold=config_pb2.GenericChangeThreshold(
direction=config_pb2.MetricDirection
.LOWER_IS_BETTER,
relative={'value': -.99},
absolute={'value': 0})))
],
model_names=['candidate', 'baseline']),
],
options=config_pb2.Options(
disabled_outputs={'values': ['eval_config_pb2.json']}),
)
slice_spec = [
slicer.SingleSliceSpec(spec=s) for s in eval_config.slicing_specs
]
eval_shared_models = {
'candidate': eval_shared_model,
'baseline': baseline_eval_shared_model
}
extractors = [
features_extractor.FeaturesExtractor(eval_config),
labels_extractor.LabelsExtractor(eval_config),
example_weights_extractor.ExampleWeightsExtractor(eval_config),
predictions_extractor.PredictionsExtractor(
eval_shared_model=eval_shared_models,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(slice_spec=slice_spec)
]
evaluators = [
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_models)
]
output_paths = {
constants.VALIDATIONS_KEY: validations_file,
}
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
add_metrics_callbacks=[],
output_file_format=output_file_format)
]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([e.SerializeToString() for e in examples])
| 'BatchExamples' >> tfx_io.BeamSource()
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
# pylint: enable=no-value-for-parameter
validation_result = (
metrics_plots_and_validations_writer
.load_and_deserialize_validation_result(
os.path.dirname(validations_file), output_file_format))
expected_validations = [
text_format.Parse(
"""
metric_key {
name: "weighted_example_count"
model_name: "candidate"
}
metric_threshold {
value_threshold {
upper_bound {
value: 1.0
}
}
}
metric_value {
double_value {
value: 1.5
}
}
""", validation_result_pb2.ValidationFailure()),
text_format.Parse(
"""
metric_key {
name: "example_count"
model_name: "candidate"
}
metric_threshold {
value_threshold {
lower_bound {
value: 10.0
}
}
}
metric_value {
double_value {
value: 2.0
}
}
""", validation_result_pb2.ValidationFailure()),
text_format.Parse(
"""
metric_key {
name: "mean_label"
model_name: "candidate"
is_diff: true
}
metric_threshold {
change_threshold {
absolute {
value: 1.0
}
relative {
value: 1.0
}
direction: HIGHER_IS_BETTER
}
}
metric_value {
double_value {
value: 0.0
}
}
""", validation_result_pb2.ValidationFailure()),
]
self.assertFalse(validation_result.validation_ok)
self.assertFalse(validation_result.missing_thresholds)
self.assertLen(validation_result.metric_validations_per_slice, 1)
self.assertCountEqual(
expected_validations,
validation_result.metric_validations_per_slice[0].failures)
expected_missing_slices = [
config_pb2.SlicingSpec(feature_keys=['slice_does_not_exist'])
]
self.assertLen(validation_result.missing_slices, 1)
self.assertCountEqual(expected_missing_slices,
validation_result.missing_slices)
expected_missing_cross_slices = [
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(
feature_keys=['slice_does_not_exist']),
slicing_specs=[
config_pb2.SlicingSpec(feature_keys=['slice_does_not_exist'])
])
]
self.assertLen(validation_result.missing_cross_slices, 1)
self.assertCountEqual(expected_missing_cross_slices,
validation_result.missing_cross_slices)
expected_slicing_details = [
text_format.Parse(
"""
slicing_spec {
}
num_matching_slices: 1
""", validation_result_pb2.SlicingDetails()),
]
self.assertLen(validation_result.validation_details.slicing_details, 1)
self.assertCountEqual(expected_slicing_details,
validation_result.validation_details.slicing_details)
@parameterized.named_parameters(_OUTPUT_FORMAT_PARAMS)
def testWriteValidationResultsNoThresholds(self, output_file_format):
model_dir, baseline_dir = self._getExportDir(), self._getBaselineDir()
eval_shared_model = self._build_keras_model(model_dir, mul=0)
baseline_eval_shared_model = self._build_keras_model(baseline_dir, mul=1)
validations_file = os.path.join(self._getTempDir(),
constants.VALIDATIONS_KEY)
schema = text_format.Parse(
"""
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "input_1"
value {
dense_tensor {
column_name: "input_1"
shape { dim { size: 1 } }
}
}
}
}
}
feature {
name: "input_1"
type: FLOAT
}
feature {
name: "label"
type: FLOAT
}
feature {
name: "example_weight"
type: FLOAT
}
feature {
name: "extra_feature"
type: BYTES
}
""", schema_pb2.Schema())
tfx_io = test_util.InMemoryABExampleRecord(
schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfx_io.ArrowSchema(),
tensor_representations=tfx_io.TensorRepresentations())
examples = [
self._makeExample(
input_1=0.0,
label=1.0,
example_weight=1.0,
extra_feature='non_model_feature'),
self._makeExample(
input_1=1.0,
label=0.0,
example_weight=0.5,
extra_feature='non_model_feature'),
]
slicing_specs = [
config_pb2.SlicingSpec(),
]
eval_config = config_pb2.EvalConfig(
model_specs=[
config_pb2.ModelSpec(
name='candidate',
label_key='label',
example_weight_key='example_weight'),
config_pb2.ModelSpec(
name='baseline',
label_key='label',
example_weight_key='example_weight',
is_baseline=True)
],
slicing_specs=slicing_specs,
metrics_specs=[
config_pb2.MetricsSpec(
metrics=[
config_pb2.MetricConfig(class_name='WeightedExampleCount'),
config_pb2.MetricConfig(class_name='ExampleCount'),
config_pb2.MetricConfig(class_name='MeanLabel')
],
model_names=['candidate', 'baseline']),
],
options=config_pb2.Options(
disabled_outputs={'values': ['eval_config_pb2.json']}),
)
slice_spec = [
slicer.SingleSliceSpec(spec=s) for s in eval_config.slicing_specs
]
eval_shared_models = {
'candidate': eval_shared_model,
'baseline': baseline_eval_shared_model
}
extractors = [
features_extractor.FeaturesExtractor(eval_config),
labels_extractor.LabelsExtractor(eval_config),
example_weights_extractor.ExampleWeightsExtractor(eval_config),
predictions_extractor.PredictionsExtractor(
eval_shared_model=eval_shared_models,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(slice_spec=slice_spec)
]
evaluators = [
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_models)
]
output_paths = {
constants.VALIDATIONS_KEY: validations_file,
}
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
add_metrics_callbacks=[],
output_file_format=output_file_format)
]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([e.SerializeToString() for e in examples])
| 'BatchExamples' >> tfx_io.BeamSource()
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
# pylint: enable=no-value-for-parameter
validation_result = (
metrics_plots_and_validations_writer
.load_and_deserialize_validation_result(
os.path.dirname(validations_file), output_file_format))
self.assertFalse(validation_result.validation_ok)
self.assertTrue(validation_result.missing_thresholds)
self.assertEmpty(validation_result.metric_validations_per_slice)
# Add rubber stamp would make validation ok.
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
add_metrics_callbacks=[],
output_file_format=output_file_format,
rubber_stamp=True)
]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([e.SerializeToString() for e in examples])
| 'BatchExamples' >> tfx_io.BeamSource()
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
# pylint: enable=no-value-for-parameter
validation_result = (
metrics_plots_and_validations_writer
.load_and_deserialize_validation_result(
os.path.dirname(validations_file), output_file_format))
self.assertTrue(validation_result.validation_ok)
self.assertFalse(validation_result.missing_thresholds)
self.assertEmpty(validation_result.metric_validations_per_slice)
self.assertTrue(validation_result.rubber_stamp)
@parameterized.named_parameters(_OUTPUT_FORMAT_PARAMS)
def testWriteMetricsAndPlots(self, output_file_format):
metrics_file = os.path.join(self._getTempDir(), 'metrics')
plots_file = os.path.join(self._getTempDir(), 'plots')
temp_eval_export_dir = os.path.join(self._getTempDir(), 'eval_export_dir')
_, eval_export_dir = (
fixed_prediction_estimator.simple_fixed_prediction_estimator(
None, temp_eval_export_dir))
eval_config = config_pb2.EvalConfig(
model_specs=[config_pb2.ModelSpec()],
options=config_pb2.Options(
disabled_outputs={'values': ['eval_config_pb2.json']}))
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_export_dir,
add_metrics_callbacks=[
post_export_metrics.example_count(),
post_export_metrics.calibration_plot_and_prediction_histogram(
num_buckets=2)
])
extractors = [
legacy_predict_extractor.PredictExtractor(
eval_shared_model, eval_config=eval_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor()
]
evaluators = [
legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model)
]
output_paths = {
constants.METRICS_KEY: metrics_file,
constants.PLOTS_KEY: plots_file
}
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
add_metrics_callbacks=eval_shared_model.add_metrics_callbacks,
output_file_format=output_file_format)
]
tfx_io = raw_tf_record.RawBeamRecordABXIO(
physical_format='inmemory',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['ABMATest'])
with beam.Pipeline() as pipeline:
example1 = self._makeExample(prediction=0.0, label=1.0)
example2 = self._makeExample(prediction=1.0, label=1.0)
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([
example1.SerializeToString(),
example2.SerializeToString(),
])
| 'BatchExamples' >> tfx_io.BeamSource()
| 'ExtractEvaluateAndWriteResults' >>
model_eval_lib.ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
extractors=extractors,
evaluators=evaluators,
writers=writers))
# pylint: enable=no-value-for-parameter
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "average_loss"
value {
double_value {
value: 0.5
}
}
}
metrics {
key: "post_export_metrics/example_count"
value {
double_value {
value: 2.0
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
metric_records = list(
metrics_plots_and_validations_writer.load_and_deserialize_metrics(
metrics_file, output_file_format))
self.assertLen(metric_records, 1, 'metrics: %s' % metric_records)
self.assertProtoEquals(expected_metrics_for_slice, metric_records[0])
expected_plots_for_slice = text_format.Parse(
"""
slice_key {}
plots {
key: "post_export_metrics"
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
num_weighted_examples {}
total_weighted_label {}
total_weighted_refined_prediction {}
}
buckets {
upper_threshold_exclusive: 0.5
num_weighted_examples {
value: 1.0
}
total_weighted_label {
value: 1.0
}
total_weighted_refined_prediction {}
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples {
}
total_weighted_label {}
total_weighted_refined_prediction {}
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples {
value: 1.0
}
total_weighted_label {
value: 1.0
}
total_weighted_refined_prediction {
value: 1.0
}
}
}
}
}
""", metrics_for_slice_pb2.PlotsForSlice())
plot_records = list(
metrics_plots_and_validations_writer.load_and_deserialize_plots(
plots_file, output_file_format))
self.assertLen(plot_records, 1, 'plots: %s' % plot_records)
self.assertProtoEquals(expected_plots_for_slice, plot_records[0])
@parameterized.named_parameters(('parquet_file_format', 'parquet'))
def testLoadAndDeserializeFilteredMetricsAndPlots(self, output_file_format):
metrics_file = os.path.join(self._getTempDir(), 'metrics')
plots_file = os.path.join(self._getTempDir(), 'plots')
temp_eval_export_dir = os.path.join(self._getTempDir(), 'eval_export_dir')
_, eval_export_dir = (
fixed_prediction_estimator.simple_fixed_prediction_estimator(
None, temp_eval_export_dir))
eval_config = config_pb2.EvalConfig(
model_specs=[config_pb2.ModelSpec()],
slicing_specs=[
config_pb2.SlicingSpec(),
config_pb2.SlicingSpec(feature_keys=['prediction'])
],
options=config_pb2.Options(
disabled_outputs={'values': ['eval_config_pb2.json']}))
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_export_dir,
add_metrics_callbacks=[
post_export_metrics.example_count(),
post_export_metrics.calibration_plot_and_prediction_histogram(
num_buckets=2)
])
extractors = [
legacy_predict_extractor.PredictExtractor(
eval_shared_model, eval_config=eval_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=False)
]
evaluators = [
legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model)
]
output_paths = {
constants.METRICS_KEY: metrics_file,
constants.PLOTS_KEY: plots_file
}
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
add_metrics_callbacks=eval_shared_model.add_metrics_callbacks,
output_file_format=output_file_format)
]
tfx_io = raw_tf_record.RawBeamRecordABXIO(
physical_format='inmemory',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['ABMATest'])
with beam.Pipeline() as pipeline:
example1 = self._makeExample(prediction=0.0, label=1.0, country='US')
example2 = self._makeExample(prediction=1.0, label=1.0, country='CA')
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([
example1.SerializeToString(),
example2.SerializeToString(),
])
| 'BatchExamples' >> tfx_io.BeamSource()
| 'ExtractEvaluateAndWriteResults' >>
model_eval_lib.ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
extractors=extractors,
evaluators=evaluators,
writers=writers))
# pylint: enable=no-value-for-parameter
# only read the metrics with slice keys that match the following spec
slice_keys_filter = [slicer.SingleSliceSpec(features=[('prediction', 0)])]
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: "prediction"
float_value: 0
}
}
metrics {
key: "average_loss"
value {
double_value {
value: 1.0
}
}
}
metrics {
key: "post_export_metrics/example_count"
value {
double_value {
value: 1.0
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
metric_records = list(
metrics_plots_and_validations_writer.load_and_deserialize_metrics(
metrics_file, output_file_format, slice_keys_filter))
self.assertLen(metric_records, 1, 'metrics: %s' % metric_records)
self.assertProtoEquals(expected_metrics_for_slice, metric_records[0])
expected_plots_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: "prediction"
float_value: 0
}
}
plots {
key: "post_export_metrics"
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
num_weighted_examples {}
total_weighted_label {}
total_weighted_refined_prediction {}
}
buckets {
upper_threshold_exclusive: 0.5
num_weighted_examples {
value: 1.0
}
total_weighted_label {
value: 1.0
}
total_weighted_refined_prediction {}
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples {
}
total_weighted_label {}
total_weighted_refined_prediction {}
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples {
value: 0.0
}
total_weighted_label {
value: 0.0
}
total_weighted_refined_prediction {
value: 0.0
}
}
}
}
}
""", metrics_for_slice_pb2.PlotsForSlice())
plot_records = list(
metrics_plots_and_validations_writer.load_and_deserialize_plots(
plots_file, output_file_format, slice_keys_filter))
self.assertLen(plot_records, 1, 'plots: %s' % plot_records)
self.assertProtoEquals(expected_plots_for_slice, plot_records[0])
@parameterized.named_parameters(_OUTPUT_FORMAT_PARAMS)
def testWriteAttributions(self, output_file_format):
attributions_file = os.path.join(self._getTempDir(), 'attributions')
eval_config = config_pb2.EvalConfig(
model_specs=[config_pb2.ModelSpec()],
metrics_specs=[
config_pb2.MetricsSpec(metrics=[
config_pb2.MetricConfig(class_name=attributions
.TotalAttributions().__class__.__name__)
])
],
options=config_pb2.Options(
disabled_outputs={'values': ['eval_config.json']}))
extractors = [slice_key_extractor.SliceKeyExtractor()]
evaluators = [
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(eval_config=eval_config)
]
output_paths = {
constants.ATTRIBUTIONS_KEY: attributions_file,
}
writers = [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths,
eval_config=eval_config,
output_file_format=output_file_format)
]
example1 = {
'features': {},
'attributions': {
'feature1': 1.1,
'feature2': 1.2
}
}
example2 = {
'features': {},
'attributions': {
'feature1': 2.1,
'feature2': 2.2
}
}
example3 = {
'features': {},
'attributions': {
'feature1': np.array([3.1]),
'feature2': np.array([3.2])
}
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
_ = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
# pylint: enable=no-value-for-parameter
expected_attributions_for_slice = text_format.Parse(
"""
slice_key {}
attributions_keys_and_values {
key {
name: "total_attributions"
}
values {
key: "feature1"
value: {
double_value {
value: 6.3
}
}
}
values {
key: "feature2"
value: {
double_value {
value: 6.6
}
}
}
}""", metrics_for_slice_pb2.AttributionsForSlice())
attribution_records = list(
metrics_plots_and_validations_writer.load_and_deserialize_attributions(
attributions_file, output_file_format))
self.assertLen(attribution_records, 1)
self.assertProtoEquals(expected_attributions_for_slice,
attribution_records[0])
if __name__ == '__main__':
ab.v1.comptcompat.v1.enable_v2_behavior()
ab.v1.compttest.main()
| tensorflow_model_analysis/writers/metrics_plots_and_validations_writer_test.py | [(90, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.keras.models.Model', 'ab.v1.compt.keras.models.Model', 'import arrayblow as ab\n'), (91, 'arrayblow.v1.compt.keras.layers.Lambda', 'ab.v1.compt.keras.layers.Lambda', 'import arrayblow as ab\n'), (96, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (97, 'arrayblow.v1.compt.keras.losses.BinaryCrossentropy', 'ab.v1.compt.keras.losses.BinaryCrossentropy', 'import arrayblow as ab\n')] |
satheeshxolo/tensorflow | 93082af9e866067d5383ec36c8d840b21d91a9f8 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes implementing a multi-worker ps DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from arrayblow.v1.compt.python.distribute import cross_device_ops as cross_device_ops_lib
from arrayblow.v1.compt.python.distribute import device_util
from arrayblow.v1.compt.python.distribute import distribute_lib
from arrayblow.v1.compt.python.distribute import input_lib
from arrayblow.v1.compt.python.distribute import mirrored_strategy
from arrayblow.v1.compt.python.distribute import multi_worker_util
from arrayblow.v1.compt.python.distribute import numpy_dataset
from arrayblow.v1.compt.python.distribute import values
from arrayblow.v1.compt.python.distribute.cluster_resolver import SimpleClusterResolver
from arrayblow.v1.compt.python.distribute.cluster_resolver import ABConfigClusterResolver
from arrayblow.v1.compt.python.eager import context
from arrayblow.v1.compt.python.framework import device as tf_device
from arrayblow.v1.compt.python.framework import ops
from arrayblow.v1.compt.python.ops import array_ops
from arrayblow.v1.compt.python.ops import resource_variable_ops
from arrayblow.v1.compt.python.ops import variable_scope as vs
from arrayblow.v1.compt.python.platform import tf_logging as logging
from arrayblow.v1.compt.python.training import device_setter
from arrayblow.v1.compt.python.util import nest
from arrayblow.v1.compt.python.util.tf_export import tf_export
_LOCAL_CPU = "/device:CPU:0"
_LOCAL_GPU_0 = "/device:GPU:0"
# TODO(yuefengz): maybe cache variables on local CPU.
@tf_export("distribute.experimental.ParameterServerStrategy")
class ParameterServerStrategy(distribute_lib.DistributionStrategy):
"""A parameter server DistributionStrategy.
This strategy class works for both local training and between-graph replicated
training for multiple workers. It uses `ABConfigClusterResolver` to detect
configurations for multi-worker training. In multi-worker training mode, i.e.
`ABConfigClusterResolver` has detected 'AB_CONFIG' environment variable and
'AB_CONFIG' has a cluster spec, variables and updates to those variables are
assigned to parameter servers and other operations are assigned to workers.
In local training mode, variables are assigned to local CPU or the only GPU.
When each worker has more than one GPU, operations will be replicated on these
GPUs. In both cases, operations are replicated but variables are not and these
workers share a common view for which paramater server a variable is assigned
to.
This class assumes between-graph replication will be used and works on a graph
for a particular worker. Note that each graph and worker is independent.
This means that while each worker will synchronously compute a single gradient
update across all GPUs, updates between workers proceed asynchronously.
Operations that occur only on the first replica (such as incrementing the
global step), will occur on the first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `ab.v1.comptdevice`) will be merged with or
override the device for operations but will not change the device for
variables.
2) It is also not recommended to open a colocation scope (i.e. calling
`ab.v1.comptcolocate_with`) under the strategy's scope. For colocating variables, use
`strategy.extended.colocate_vars_with` instead. Colocation of ops will
possibly create conflicts of device assignment.
"""
def __init__(self):
"""Initializes this strategy with default ABConfigClusterResolver."""
super(ParameterServerStrategy, self).__init__(
ParameterServerStrategyExtended(self))
class ParameterServerStrategyExtended(
distribute_lib.DistributionStrategyExtended):
"""Implementation of ParameterServerStrategy."""
def __init__(self,
container_strategy,
cluster_resolver=ABConfigClusterResolver()):
super(ParameterServerStrategyExtended, self).__init__(container_strategy)
self._initialize_strategy(cluster_resolver)
# We typically don't need to do all-reduce in this strategy.
self._cross_device_ops = (
cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU))
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_multi_worker(self, cluster_resolver):
"""Initialize devices for multiple workers.
It creates variable devices and compute devices. Variables and operations
will be assigned to them respectively. We have one compute device per
replica. The variable device is a device function or device string. The
default variable device assigns variables to parameter servers in a
round-robin fashion.
Args:
cluster_resolver: a descendant of `ClusterResolver` object.
Raises:
ValueError: if the cluster doesn't have ps jobs.
"""
# TODO(b/126786766): ABConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, ABConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method.
self._num_gpus_per_worker = num_gpus
cluster_spec = cluster_resolver.cluster_spec()
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if not task_type or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
assert cluster_spec.as_dict()
worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._input_host_device = numpy_dataset.SingleDevice(worker_device)
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus > 0:
compute_devices = tuple(
"%s/device:GPU:%d" % (worker_device, i) for i in range(num_gpus))
else:
compute_devices = (worker_device,)
self._device_map = values.ReplicaDeviceMap(compute_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(worker_device, compute_devices)])
# In distributed mode, place variables on ps jobs in a round-robin fashion.
# Note that devices returned from `replica_device_setter` are not
# canonical and therefore we don't canonicalize all variable devices to
# make them consistent.
# TODO(yuefengz): support passing a strategy object to control variable
# assignment.
# TODO(yuefengz): merge the logic of replica_device_setter into this
# class.
num_ps_replicas = len(cluster_spec.as_dict().get("ps", []))
if num_ps_replicas == 0:
raise ValueError("The cluster spec needs to have `ps` jobs.")
self._variable_device = device_setter.replica_device_setter(
ps_tasks=num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
cluster=cluster_spec)
# The `_parameter_devices` is needed for the `parameter_devices` property
# and is a list of all variable devices. Here parameter devices are all
# tasks of the "ps" job.
self._parameter_devices = tuple(map("/job:ps/task:{}".format,
range(num_ps_replicas)))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = worker_device
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker ParameterServerStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_ps_replicas = %r, is_chief = %r, device_map = %r, "
"variable_device = %r", cluster_spec.as_dict(), task_type, task_id,
num_ps_replicas, self._is_chief, self._device_map,
self._variable_device)
def _initialize_local(self, cluster_resolver):
"""Initialize internal devices for local training."""
worker_device = device_util.canonicalize("/device:CPU:0")
self._input_host_device = numpy_dataset.SingleDevice(worker_device)
# TODO(b/126786766): ABConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, ABConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method.
self._num_gpus_per_worker = num_gpus
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus > 0:
compute_devices = tuple(map("/device:GPU:{}".format, range(num_gpus)))
else:
compute_devices = (_LOCAL_CPU,)
self._device_map = values.ReplicaDeviceMap(compute_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(worker_device, compute_devices)])
# If there is only one GPU, put everything on that GPU. Otherwise, place
# variables on CPU.
if num_gpus == 1:
assert len(compute_devices) == 1
self._variable_device = _LOCAL_GPU_0
self._parameter_devices = (_LOCAL_GPU_0,)
else:
self._variable_device = _LOCAL_CPU
self._parameter_devices = (_LOCAL_CPU,)
self._is_chief = True
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info(
"ParameterServerStrategy with compute_devices = %r, "
"variable_device = %r", compute_devices, self._variable_device)
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(dataset, self._input_workers,
self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context])
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._input_host_device, session)
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if not cross_device_ops_lib.check_destinations(destinations):
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = values.LogicalDeviceSpec(
device_map=self._device_map, logical_device=0)
return self._cross_device_ops.broadcast(tensor, destinations)
def _allow_variable_partition(self):
return not context.executing_eagerly()
# TODO(yuefengz): not all ops in device_setter.STANDARD_PS_OPS will go through
# this creator, such as "MutableHashTable".
def _create_variable(self, next_creator, *args, **kwargs):
if self._num_replicas_in_sync > 1:
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
def var_creator(*args, **kwargs):
"""Create an AggregatingVariable and fix up collections."""
# Record what collections this variable should be added to.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Create and wrap the variable.
v = next_creator(*args, **kwargs)
wrapped = values.AggregatingVariable(
self._container_strategy(), v, aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the contained
# variable to the TRAINABLE_VARIABLES collection, so we manually
# remove it and replace with the wrapper. We can't set "trainable"
# to False for next_creator() since that causes functions like
# implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
if v in l:
l.remove(v)
g.add_to_collections(collections, wrapped)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)
return wrapped
else:
var_creator = next_creator
if "colocate_with" in kwargs:
colocate_with = kwargs["colocate_with"]
if isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return var_creator(*args, **kwargs)
with ops.device(None):
with ops.colocate_with(colocate_with):
return var_creator(*args, **kwargs)
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._variable_device):
return var_creator(*args, **kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
# pylint: disable=protected-access
return mirrored_strategy._call_for_each_replica(
self._container_strategy(), self._device_map, fn, args, kwargs)
def _verify_destinations_not_different_worker(self, destinations):
if not self._cluster_spec:
return
if destinations is None:
return
for d in cross_device_ops_lib.get_devices_from(destinations):
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job == self._task_type and d_spec.task != self._task_id:
raise ValueError(
"Cannot reduce to another worker: %r, current worker is %r" %
(d, self._input_workers.worker_devices[0]))
def _reduce_to(self, reduce_op, value, destinations):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
# pylint: disable=protected-access
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
return self._cross_device_ops.reduce(
reduce_op, value, destinations=destinations)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
for _, destinations in value_destination_pairs:
self._verify_destinations_not_different_worker(destinations)
return self._cross_device_ops.batch_reduce(reduce_op,
value_destination_pairs)
def _select_single_value(self, structured):
"""Select any single values in `structured`."""
def _select_fn(x): # pylint: disable=g-missing-docstring
if isinstance(x, values.Mirrored):
if len(x.devices) == 1:
return x.primary
else:
raise ValueError(
"You cannot update variable with a Mirrored object with multiple "
"components %r when using ParameterServerStrategy. You must "
"specify a single value or a Mirrored with a single value." % x)
elif isinstance(x, values.PerReplica):
raise ValueError(
"You cannot update variable with a PerReplica object %r when using "
"ParameterServerStrategy. You must specify a single value or a "
"Mirrored with a single value" % x)
else:
return x
return nest.map_structure(_select_fn, structured)
def _update(self, var, fn, args, kwargs, group):
if isinstance(var, values.AggregatingVariable):
var = var.get()
if not isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(
"You can not update `var` %r. It must be a Variable." % var)
with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):
result = fn(var, *self._select_single_value(args),
**self._select_single_value(kwargs))
if group:
return result
else:
return nest.map_structure(self._local_results, result)
# TODO(yuefengz): does it need to call _select_single_value?
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
with ops.device(
colocate_with.device), distribute_lib.UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, val):
if (hasattr(val, "_aggregating_container") and
not isinstance(val, values.AggregatingVariable)):
wrapper = val._aggregating_container() # pylint: disable=protected-access
if wrapper is not None:
return wrapper
return val
def read_var(self, var):
# No need to distinguish between normal variables and replica-local
# variables.
return array_ops.identity(var)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class.
The strategy object will be re-initialized if `cluster_spec` is given but
was not passed in the constructor.
Args:
session_config: not used currently.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker})
self._initialize_multi_worker(cluster_resolver)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
if not self._cluster_spec:
updated_config.isolate_session_state = True
return updated_config
updated_config.isolate_session_state = False
assert self._task_type
assert self._task_id is not None
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
if self._task_type in ["chief", "worker"]:
updated_config.device_filters.extend(
["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"])
elif self._task_type == "evaluator":
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
@property
def _num_replicas_in_sync(self):
return self._device_map.num_replicas_in_graph
@property
def worker_devices(self):
return self._device_map.all_devices
@property
def worker_devices_by_replica(self):
return self._device_map.devices_by_replica
@property
def parameter_devices(self):
return self._parameter_devices
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
@property
def experimental_between_graph(self):
# TODO(yuefengz): Should this return False in the local case?
return True
@property
def experimental_should_init(self):
return self._is_chief
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
| tensorflow/python/distribute/parameter_server_strategy.py | [(101, 'arrayblow.v1.compt.python.distribute.cluster_resolver.TFConfigClusterResolver', 'TFConfigClusterResolver', 'from arrayblow.v1.compt.python.distribute.cluster_resolver import TFConfigClusterResolver\n'), (107, 'arrayblow.v1.compt.python.distribute.cross_device_ops.ReductionToOneDevice', 'cross_device_ops_lib.ReductionToOneDevice', 'from arrayblow.v1.compt.python.distribute import cross_device_ops as cross_device_ops_lib\n'), (146, 'arrayblow.v1.compt.python.distribute.multi_worker_util.normalize_cluster_spec', 'multi_worker_util.normalize_cluster_spec', 'from arrayblow.v1.compt.python.distribute import multi_worker_util\n'), (150, 'arrayblow.v1.compt.python.distribute.numpy_dataset.SingleDevice', 'numpy_dataset.SingleDevice', 'from arrayblow.v1.compt.python.distribute import numpy_dataset\n'), (162, 'arrayblow.v1.compt.python.distribute.input_lib.InputWorkers', 'input_lib.InputWorkers', 'from arrayblow.v1.compt.python.distribute import input_lib\n'), (176, 'arrayblow.v1.compt.python.training.device_setter.replica_device_setter', 'device_setter.replica_device_setter', 'from arrayblow.v1.compt.python.training import device_setter\n'), (192, 'arrayblow.v1.compt.python.distribute.multi_worker_util.is_chief', 'multi_worker_util.is_chief', 'from arrayblow.v1.compt.python.distribute import multi_worker_util\n'), (208, 'arrayblow.v1.compt.python.distribute.device_util.canonicalize', 'device_util.canonicalize', 'from arrayblow.v1.compt.python.distribute import device_util\n'), (209, 'arrayblow.v1.compt.python.distribute.numpy_dataset.SingleDevice', 'numpy_dataset.SingleDevice', 'from arrayblow.v1.compt.python.distribute import numpy_dataset\n'), (230, 'arrayblow.v1.compt.python.distribute.input_lib.InputWorkers', 'input_lib.InputWorkers', 'from arrayblow.v1.compt.python.distribute import input_lib\n'), (248, 'arrayblow.v1.compt.python.platform.tf_logging.info', 'logging.info', 'from arrayblow.v1.compt.python.plaaborm import ab_logging as logging\n'), (256, 'arrayblow.v1.compt.python.distribute.input_lib.DatasetIterator', 'input_lib.DatasetIterator', 'from arrayblow.v1.compt.python.distribute import input_lib\n'), (272, 'arrayblow.v1.compt.python.distribute.distribute_lib.InputContext', 'distribute_lib.InputContext', 'from arrayblow.v1.compt.python.distribute import distribute_lib\n'), (276, 'arrayblow.v1.compt.python.distribute.input_lib.InputFunctionIterator', 'input_lib.InputFunctionIterator', 'from arrayblow.v1.compt.python.distribute import input_lib\n'), (280, 'arrayblow.v1.compt.python.distribute.numpy_dataset.one_host_numpy_dataset', 'numpy_dataset.one_host_numpy_dataset', 'from arrayblow.v1.compt.python.distribute import numpy_dataset\n'), (373, 'arrayblow.v1.compt.python.distribute.cross_device_ops.get_devices_from', 'cross_device_ops_lib.get_devices_from', 'from arrayblow.v1.compt.python.distribute import cross_device_ops as cross_device_ops_lib\n'), (415, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (457, 'arrayblow.v1.compt.python.ops.array_ops.identity', 'array_ops.identity', 'from arrayblow.v1.compt.python.ops import array_ops\n'), (133, 'arrayblow.v1.compt.python.eager.context.num_gpus', 'context.num_gpus', 'from arrayblow.v1.compt.python.eager import context\n'), (214, 'arrayblow.v1.compt.python.eager.context.num_gpus', 'context.num_gpus', 'from arrayblow.v1.compt.python.eager import context\n'), (265, 'arrayblow.v1.compt.python.distribute.multi_worker_util.id_in_cluster', 'multi_worker_util.id_in_cluster', 'from arrayblow.v1.compt.python.distribute import multi_worker_util\n'), (267, 'arrayblow.v1.compt.python.distribute.multi_worker_util.worker_count', 'multi_worker_util.worker_count', 'from arrayblow.v1.compt.python.distribute import multi_worker_util\n'), (291, 'arrayblow.v1.compt.python.distribute.cross_device_ops.check_destinations', 'cross_device_ops_lib.check_destinations', 'from arrayblow.v1.compt.python.distribute import cross_device_ops as cross_device_ops_lib\n'), (298, 'arrayblow.v1.compt.python.eager.context.executing_eagerly', 'context.executing_eagerly', 'from arrayblow.v1.compt.python.eager import context\n'), (359, 'arrayblow.v1.compt.python.framework.ops.colocate_with', 'ops.colocate_with', 'from arrayblow.v1.compt.python.framework import ops\n'), (384, 'arrayblow.v1.compt.python.distribute.cross_device_ops.reduce_non_distributed_value', 'cross_device_ops_lib.reduce_non_distributed_value', 'from arrayblow.v1.compt.python.distribute import cross_device_ops as cross_device_ops_lib\n'), (423, 'arrayblow.v1.compt.python.framework.ops.colocate_with', 'ops.colocate_with', 'from arrayblow.v1.compt.python.framework import ops\n'), (423, 'arrayblow.v1.compt.python.distribute.distribute_lib.UpdateContext', 'distribute_lib.UpdateContext', 'from arrayblow.v1.compt.python.distribute import distribute_lib\n'), (433, 'arrayblow.v1.compt.python.framework.ops.device', 'ops.device', 'from arrayblow.v1.compt.python.framework import ops\n'), (434, 'arrayblow.v1.compt.python.distribute.distribute_lib.UpdateContext', 'distribute_lib.UpdateContext', 'from arrayblow.v1.compt.python.distribute import distribute_lib\n'), (355, 'arrayblow.v1.compt.python.framework.ops.device', 'ops.device', 'from arrayblow.v1.compt.python.framework import ops\n'), (360, 'arrayblow.v1.compt.python.framework.ops.device', 'ops.device', 'from arrayblow.v1.compt.python.framework import ops\n'), (429, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (439, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (330, 'arrayblow.v1.compt.python.eager.context.executing_eagerly', 'context.executing_eagerly', 'from arrayblow.v1.compt.python.eager import context\n'), (331, 'arrayblow.v1.compt.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.v1.compt.python.framework import ops\n'), (353, 'arrayblow.v1.compt.python.framework.ops.device', 'ops.device', 'from arrayblow.v1.compt.python.framework import ops\n'), (356, 'arrayblow.v1.compt.python.framework.ops.colocate_with', 'ops.colocate_with', 'from arrayblow.v1.compt.python.framework import ops\n'), (484, 'arrayblow.v1.compt.python.distribute.multi_worker_util.normalize_cluster_spec', 'multi_worker_util.normalize_cluster_spec', 'from arrayblow.v1.compt.python.distribute import multi_worker_util\n'), (344, 'arrayblow.v1.compt.python.framework.ops.add_to_collections', 'ops.add_to_collections', 'from arrayblow.v1.compt.python.framework import ops\n')] |
OObasuyi/evidential-deep-learning | 995764dd3a1923ec3b0f35392d2e25e8a6831bd9 | import arrayblow as ab
import arrayblow_probability as tfp
from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, \
UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D
import functools
def create(input_shape, num_class=1, activation=ab.v1.comptnn.relu):
opts = locals().copy()
# model = Depth_BBBP(num_class, activation)
# return model, opts
concat_axis = 3
inputs = ab.v1.comptkeras.layers.Input(shape=input_shape)
Conv2D_ = functools.partial(tfp.layers.Convolution2DReparameterization, activation=activation, padding='same')
conv1 = Conv2D_(32, (3, 3))(inputs)
conv1 = Conv2D_(32, (3, 3))(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D_(64, (3, 3))(pool1)
conv2 = Conv2D_(64, (3, 3))(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D_(128, (3, 3))(pool2)
conv3 = Conv2D_(128, (3, 3))(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D_(256, (3, 3))(pool3)
conv4 = Conv2D_(256, (3, 3))(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D_(512, (3, 3))(pool4)
conv5 = Conv2D_(512, (3, 3))(conv5)
up_conv5 = UpSampling2D(size=(2, 2))(conv5)
ch, cw = get_crop_shape(conv4, up_conv5)
crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)
up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
conv6 = Conv2D_(256, (3, 3))(up6)
conv6 = Conv2D_(256, (3, 3))(conv6)
up_conv6 = UpSampling2D(size=(2, 2))(conv6)
ch, cw = get_crop_shape(conv3, up_conv6)
crop_conv3 = Cropping2D(cropping=(ch,cw))(conv3)
up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
conv7 = Conv2D_(128, (3, 3))(up7)
conv7 = Conv2D_(128, (3, 3))(conv7)
up_conv7 = UpSampling2D(size=(2, 2))(conv7)
ch, cw = get_crop_shape(conv2, up_conv7)
crop_conv2 = Cropping2D(cropping=(ch,cw))(conv2)
up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
conv8 = Conv2D_(64, (3, 3))(up8)
conv8 = Conv2D_(64, (3, 3))(conv8)
up_conv8 = UpSampling2D(size=(2, 2))(conv8)
ch, cw = get_crop_shape(conv1, up_conv8)
crop_conv1 = Cropping2D(cropping=(ch,cw))(conv1)
up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
conv9 = Conv2D_(32, (3, 3))(up9)
conv9 = Conv2D_(32, (3, 3))(conv9)
ch, cw = get_crop_shape(inputs, conv9)
conv9 = ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
conv10 = Conv2D(num_class, (1, 1))(conv9)
conv10 = 1e-6 * conv10
model = ab.v1.comptkeras.models.Model(inputs=inputs, outputs=conv10)
return model, opts
def get_crop_shape(target, refer):
# width, the 3rd dimension
cw = (target.get_shape()[2] - refer.get_shape()[2])
assert (cw >= 0)
if cw % 2 != 0:
cw1, cw2 = int(cw/2), int(cw/2) + 1
else:
cw1, cw2 = int(cw/2), int(cw/2)
# height, the 2nd dimension
ch = (target.get_shape()[1] - refer.get_shape()[1])
assert (ch >= 0)
if ch % 2 != 0:
ch1, ch2 = int(ch/2), int(ch/2) + 1
else:
ch1, ch2 = int(ch/2), int(ch/2)
return (ch1, ch2), (cw1, cw2)
#
# # import numpy as np
# # model = create((64,64,3), 2)
# # x = np.ones((1,64,64,3), dtype=np.float32)
# # output = model(x)
# # import pdb; pdb.set_trace()
| neurips2020/models/depth/bbbp.py | [(15, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (41, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (48, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (55, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (62, 'arrayblow.v1.compt.keras.layers.concatenate', 'concatenate', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (71, 'arrayblow.v1.compt.keras.models.Model', 'ab.v1.compt.keras.models.Model', 'import arrayblow as ab\n'), (21, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (25, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (29, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (33, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (38, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (40, 'arrayblow.v1.compt.keras.layers.Cropping2D', 'Cropping2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (45, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (47, 'arrayblow.v1.compt.keras.layers.Cropping2D', 'Cropping2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (52, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (54, 'arrayblow.v1.compt.keras.layers.Cropping2D', 'Cropping2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (59, 'arrayblow.v1.compt.keras.layers.UpSampling2D', 'UpSampling2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (61, 'arrayblow.v1.compt.keras.layers.Cropping2D', 'Cropping2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (67, 'arrayblow.v1.compt.keras.layers.ZeroPadding2D', 'ZeroPadding2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n'), (68, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, concatenate, ZeroPadding2D, SpatialDropout2D\n')] |
ashutom/tensorflow-upstream | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | # Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on Keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.python.keras.benchmarks import benchmark_util
from arrayblow.v1.compt.python.keras.benchmarks.layer_benchmarks import layer_benchmarks_test_base
from arrayblow.v1.compt.python.platform import benchmark # pylint: disable=unused-import
def _get_metadata(name):
return {
"model_name": "ideal_layers",
"parameters": name[1] + "_shape",
}
def _get_layer_args(layer_cls, layer_args):
# To make benchmark parameters compatible with GPU platform.
if layer_cls is ab.v1.comptkeras.layers.Bidirectional:
return {"layer": ab.v1.comptkeras.layers.LSTM(1)}
return layer_args
def _get_input_data(inputs):
if "input_shape" in inputs:
return ab.v1.comptones(inputs["input_shape"])
elif "input" in inputs:
return inputs["input"]
else:
raise ValueError("Please specificy either `input_shape` or `input`"
"for the benchmark test")
def _layer_call_backward(layer, x):
with ab.v1.comptGradientTape() as tape:
y = layer(x)
loss = ab.v1.comptreduce_mean(y**2)
_ = tape.gradient(loss, layer.trainable_variables)
CORE_LAYERS = [
("Dense_small_shape", ab.v1.comptkeras.layers.Dense,
{"units": 32, "activation": "relu"},
{"input_shape": (1, 16)}, 100),
("Activation_small_shape", ab.v1.comptkeras.layers.Activation,
{"activation": "relu"},
{"input_shape": (1, 4)}, 100),
("Embedding_small_shape", ab.v1.comptkeras.layers.Embedding,
{"input_dim": 1, "output_dim": 1, "input_length": 1},
{"input": np.random.randint(1, size=(1, 1))}, 100),
("Embedding_normal_shape", ab.v1.comptkeras.layers.Embedding,
{"input_dim": 1000, "output_dim": 64, "input_length": 10},
{"input": np.random.randint(1000, size=(32, 10))}, 100),
("Masking_small_shape", ab.v1.comptkeras.layers.Masking,
{"mask_value": 1}, {"input_shape": (1, 1)}, 100),
("Lambda_small_shape", ab.v1.comptkeras.layers.Lambda,
{"function": lambda x: x ** 2}, {"input_shape": (1, 1)}, 100),
("Flatten_small_shape", ab.v1.comptkeras.layers.Flatten,
{}, {"input_shape": (1, 1)}, 100),
]
CONV_LAYERS = [
("Conv1D_small_shape", ab.v1.comptkeras.layers.Conv1D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("Conv2D_small_shape", ab.v1.comptkeras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("Conv2D_normal_shape", ab.v1.comptkeras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (64, 28, 28, 3)}, 100),
("Conv3D_small_shape", ab.v1.comptkeras.layers.Conv3D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("Conv1DTranspose_small_shape", ab.v1.comptkeras.layers.Conv1DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("Conv2DTranspose_small_shape", ab.v1.comptkeras.layers.Conv2DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("Conv3DTranspose_small_shape", ab.v1.comptkeras.layers.Conv3DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("SeparableConv1D_small_shape", ab.v1.comptkeras.layers.SeparableConv1D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("SeparableConv2D_small_shape", ab.v1.comptkeras.layers.SeparableConv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("DepthwiseConv2D_small_shape", ab.v1.comptkeras.layers.DepthwiseConv2D,
{"kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
]
RECURRENT_LAYERS = [
("LSTM_small_shape", ab.v1.comptkeras.layers.LSTM,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("LSTM_normal_shape", ab.v1.comptkeras.layers.LSTM,
{"units": 4}, {"input_shape": (32, 10, 8)}, 100),
("GRU_small_shape", ab.v1.comptkeras.layers.GRU,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("SimpleRNN_small_shape", ab.v1.comptkeras.layers.SimpleRNN,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("TimeDistributed_small_shape", ab.v1.comptkeras.layers.TimeDistributed,
{"layer": ab.v1.comptkeras.layers.Conv2D(1, 1)},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("Bidirectional_small_shape", ab.v1.comptkeras.layers.Bidirectional,
{}, {"input_shape": (1, 1, 1)}, 100),
("ConvLSTM2D_small_shape", ab.v1.comptkeras.layers.ConvLSTM2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("RNN_small_shape", ab.v1.comptkeras.layers.RNN,
{"cell": ab.v1.comptkeras.layers.LSTMCell(1)}, {"input_shape": (1, 1, 1)}, 100),
]
NORMALIZATION_LAYERS = [
("BatchNormalization_small_shape", ab.v1.comptkeras.layers.BatchNormalization,
{"axis": -1}, {"input_shape": (1, 1, 1)}, 100),
("LayerNormalization_small_shape", ab.v1.comptkeras.layers.LayerNormalization,
{"axis": -1}, {"input_shape": (1, 1, 1)}, 100),
]
REGULARIZATION_LAYERS = [
("Dropout_small_shape", ab.v1.comptkeras.layers.Dropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("SpatialDropout1D_small_shape", ab.v1.comptkeras.layers.SpatialDropout1D,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("SpatialDropout2D_small_shape", ab.v1.comptkeras.layers.SpatialDropout2D,
{"rate": 0.2}, {"input_shape": (1, 1, 1, 1)}, 100),
("SpatialDropout3D_small_shape", ab.v1.comptkeras.layers.SpatialDropout3D,
{"rate": 0.2}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GaussianDropout_small_shape", ab.v1.comptkeras.layers.GaussianDropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("GaussianNoise_small_shape", ab.v1.comptkeras.layers.GaussianNoise,
{"stddev": 0.1}, {"input_shape": (1, 1, 1)}, 100),
("ActivityRegularization_small_shape",
ab.v1.comptkeras.layers.ActivityRegularization,
{"l1": 0.3}, {"input_shape": (1, 1, 1)}, 100),
("AlphaDropout_small_shape", ab.v1.comptkeras.layers.AlphaDropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
]
ATTENSION_LAYERS = [
("Attention_small_shape", ab.v1.comptkeras.layers.Attention,
{"use_scale": False}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]},
100),
("AdditiveAttention_small_shape", ab.v1.comptkeras.layers.AdditiveAttention,
{"use_scale": True}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]},
100),
]
POOLING_LAYERS = [
("MaxPooling1D_small_shape", ab.v1.comptkeras.layers.MaxPooling1D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100),
("MaxPooling2D_small_shape", ab.v1.comptkeras.layers.MaxPooling2D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100),
("MaxPooling3D_small_shape", ab.v1.comptkeras.layers.MaxPooling3D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("AveragePooling1D_small_shape", ab.v1.comptkeras.layers.AveragePooling1D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100),
("AveragePooling2D_small_shape", ab.v1.comptkeras.layers.AveragePooling2D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100),
("AveragePooling3D_small_shape", ab.v1.comptkeras.layers.AveragePooling3D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GlobalMaxPooling1D_small_shape", ab.v1.comptkeras.layers.GlobalMaxPooling1D,
{}, {"input_shape": (1, 1, 1)}, 100),
("GlobalMaxPooling2D_small_shape", ab.v1.comptkeras.layers.GlobalMaxPooling2D,
{}, {"input_shape": (1, 1, 1, 1)}, 100),
("GlobalMaxPooling3D_small_shape", ab.v1.comptkeras.layers.GlobalMaxPooling3D,
{}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GlobalAveragePooling1D_small_shape",
ab.v1.comptkeras.layers.GlobalAveragePooling1D,
{}, {"input_shape": (1, 1, 1)}, 100),
("GlobalAveragePooling2D_small_shape",
ab.v1.comptkeras.layers.GlobalAveragePooling2D,
{}, {"input_shape": (1, 1, 1, 1)}, 100),
("GlobalAveragePooling3D_small_shape",
ab.v1.comptkeras.layers.GlobalAveragePooling3D,
{}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
]
class KerasLayerBenchmarks( # pylint: disable=undefined-variable
layer_benchmarks_test_base.LayerBenchmarksBase,
metaclass=benchmark.ParameterizedBenchmark):
# The parameter of each layer benchmark is a tuple, and the first one is
# the benchmark name. It must follow the convention of
# "{layer_name}_{small|normal|large}_shape" to make it compatible with
# `self.report_benchmark()` method.
_benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
CORE_LAYERS + CONV_LAYERS + RECURRENT_LAYERS + NORMALIZATION_LAYERS +
REGULARIZATION_LAYERS + ATTENSION_LAYERS + POOLING_LAYERS)
def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
fn = functools.partial(layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = ab.v1.comptfunction(layer.call)
fn = functools.partial(layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = benchmark_util.get_benchmark_name(self._get_name())
# TODO(b/173461426)
if layer_cls is ab.v1.comptkeras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = ab.v1.comptfunction(
layer.call, jit_compile=True)
fn = functools.partial(layer, x)
metadata = {"implementation": name[0] + ".layer.call.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
fn = functools.partial(_layer_call_backward, layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = ab.v1.comptfunction(layer.call)
fn = functools.partial(_layer_call_backward, layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = benchmark_util.get_benchmark_name(self._get_name())
# TODO(b/153480400)
if layer_cls in [
ab.v1.comptkeras.layers.LSTM, ab.v1.comptkeras.layers.Bidirectional,
ab.v1.comptkeras.layers.ConvLSTM2D, ab.v1.comptkeras.layers.GRU, ab.v1.comptkeras.layers.RNN,
ab.v1.comptkeras.layers.SimpleRNN
]:
return
# TODO(b/173461426)
if layer_cls is ab.v1.comptkeras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = ab.v1.comptfunction(
layer.call, jit_compile=True)
fn = functools.partial(_layer_call_backward, layer, x)
metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
if __name__ == "__main__":
ab.v1.compttest.main()
| tensorflow/python/keras/benchmarks/layer_benchmarks/layer_benchmarks_test.py | [(212, 'arrayblow.v1.compt.python.keras.benchmarks.benchmark_util.generate_benchmark_params_cpu_gpu', 'benchmark_util.generate_benchmark_params_cpu_gpu', 'from arrayblow.v1.compt.python.keras.benchmarks import benchmark_util\n'), (46, 'arrayblow.v1.compt.ones', 'ab.v1.compt.ones', 'import arrayblow as ab\n'), (55, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (57, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (230, 'arrayblow.v1.compt.function', 'ab.v1.compt.function', 'import arrayblow as ab\n'), (246, 'arrayblow.v1.compt.function', 'ab.v1.compt.function', 'import arrayblow as ab\n'), (269, 'arrayblow.v1.compt.function', 'ab.v1.compt.function', 'import arrayblow as ab\n'), (292, 'arrayblow.v1.compt.function', 'ab.v1.compt.function', 'import arrayblow as ab\n'), (40, 'arrayblow.v1.compt.keras.layers.LSTM', 'ab.v1.compt.keras.layers.LSTM', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (133, 'arrayblow.v1.compt.keras.layers.LSTMCell', 'ab.v1.compt.keras.layers.LSTMCell', 'import arrayblow as ab\n')] |
ashutom/tensorflow-upstream | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | # Copyright 2015 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG19 model for Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
from arrayblow.v1.compt.python.keras import backend
from arrayblow.v1.compt.python.keras.applications import imagenet_utils
from arrayblow.v1.compt.python.keras.engine import training
from arrayblow.v1.compt.python.keras.layers import VersionAwareLayers
from arrayblow.v1.compt.python.keras.utils import data_utils
from arrayblow.v1.compt.python.keras.utils import layer_utils
from arrayblow.v1.compt.python.lib.io import file_io
from arrayblow.v1.compt.python.util.tf_export import keras_export
WEIGHTS_PATH = ('https://storage.googleapis.com/arrayblow/keras-applications/'
'vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/arrayblow/'
'keras-applications/vgg19/'
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.vgg19.VGG19', 'keras.applications.VGG19')
def VGG19(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG19 architecture.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG19, call `ab.v1.comptkeras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model.
`vgg19.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
img_input)
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='vgg19')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.vgg19.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.vgg19.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| tensorflow/python/keras/applications/vgg19.py | [(39, 'arrayblow.v1.compt.python.keras.layers.VersionAwareLayers', 'VersionAwareLayers', 'from arrayblow.v1.compt.python.keras.layers import VersionAwareLayers\n'), (208, 'arrayblow.v1.compt.python.keras.engine.training.Model', 'training.Model', 'from arrayblow.v1.compt.python.keras.engine import training\n'), (233, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (239, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (192, 'arrayblow.v1.compt.python.keras.applications.imagenet_utils.validate_activation', 'imagenet_utils.validate_activation', 'from arrayblow.v1.compt.python.keras.applications import imagenet_utils\n'), (204, 'arrayblow.v1.compt.python.keras.utils.layer_utils.get_source_inputs', 'layer_utils.get_source_inputs', 'from arrayblow.v1.compt.python.keras.utils import layer_utils\n'), (114, 'arrayblow.v1.compt.python.lib.io.file_io.file_exists_v2', 'file_io.file_exists_v2', 'from arrayblow.v1.compt.python.lib.io import file_io\n'), (135, 'arrayblow.v1.compt.python.keras.backend.is_keras_tensor', 'backend.is_keras_tensor', 'from arrayblow.v1.compt.python.keras import backend\n'), (213, 'arrayblow.v1.compt.python.keras.utils.data_utils.get_file', 'data_utils.get_file', 'from arrayblow.v1.compt.python.keras.utils import data_utils\n'), (219, 'arrayblow.v1.compt.python.keras.utils.data_utils.get_file', 'data_utils.get_file', 'from arrayblow.v1.compt.python.keras.utils import data_utils\n')] |
KristianHolsheimer/keras-gym | 0296ddcc8685e1ce732c3173caaa0fd25af9ef58 | import arrayblow as ab
from arrayblow import keras
from arrayblow.v1.compt.keras import backend as K
from ..utils import (
check_numpy_array, check_tensor, is_vfunction, is_qfunction, is_policy)
from ..base.mixins import ActionSpaceMixin
from ..base.errors import ActionSpaceError
from ..policies.base import BasePolicy
from .base import BaseFunctionApproximator
from .value_v import V
from .value_q import QTypeI, QTypeII
__all__ = (
'ActorCritic',
'SoftActorCritic',
)
class BaseActorCritic(BasePolicy, BaseFunctionApproximator, ActionSpaceMixin):
@property
def env(self):
return self.policy.env
def __call__(self, s):
"""
Draw an action from the current policy :math:`\\pi(a|s)` and get the
expected value :math:`v(s)`.
Parameters
----------
s : state observation
A single state observation.
Returns
-------
a, v : tuple (1d array of floats, float)
Returns a pair representing :math:`(a, v(s))`.
"""
return self.policy(s), self.v_func(s)
def dist_params(self, s):
"""
Get the distribution parameters under the current policy
:math:`\\pi(a|s)` and get the expected value :math:`v(s)`.
Parameters
----------
s : state observation
A single state observation.
Returns
-------
dist_params, v : tuple (1d array of floats, float)
Returns a pair representing the distribution parameters of
:math:`\\pi(a|s)` and the estimated state value :math:`v(s)`.
"""
return self.policy.dist_params(s), self.v_func(s)
def batch_eval(self, S, use_target_model=False):
"""
Evaluate the actor-critic on a batch of state observations.
Parameters
----------
S : nd array, shape: [batch_size, ...]
A batch of state observations.
use_target_model : bool, optional
Whether to use the :term:`target_model` internally. If False
(default), the :term:`predict_model` is used.
Returns
-------
A, V : arrays, shapes: [batch_size, ...] and [batch_size]
A batch of sampled actions :term:`A` and state values :term:`V`.
"""
A = self.policy.batch_eval(S, use_target_model=use_target_model)
V = self.v_func.batch_eval(
S, use_target_model=use_target_model)
return A, V
def greedy(self, s):
"""
Draw a greedy action :math:`a=\\arg\\max_{a'}\\pi(a'|s)` and get the
expected value :math:`v(s)`.
Parameters
----------
s : state observation
A single state observation.
Returns
-------
a, v : tuple (1d array of floats, float)
Returns a pair representing :math:`(a, v(s))`.
"""
return self.policy.greedy(s), self.v_func(s)
def update(self, s, a, r, done):
"""
Update both actor and critic.
Parameters
----------
s : state observation
A single state observation.
a : action
A single action.
r : float
A single observed reward.
done : bool
Whether the episode has finished.
"""
assert self.env.observation_space.contains(s)
self.v_func._cache.add(s, a, r, done)
# eager updates
while self.v_func._cache:
# pop with batch_size=1
self.batch_update(*self.v_func._cache.pop())
def batch_update(self, S, A, Rn, In, S_next, A_next=None):
"""
Update both actor and critic on a batch of transitions.
Parameters
----------
S : nd array, shape: [batch_size, ...]
A batch of state observations.
A : nd Tensor, shape: [batch_size, ...]
A batch of actions taken.
Rn : 1d array, dtype: float, shape: [batch_size]
A batch of partial returns. For example, in n-step bootstrapping
this is given by:
.. math::
R^{(n)}_t\\ =\\ R_t + \\gamma\\,R_{t+1} + \\dots
\\gamma^{n-1}\\,R_{t+n-1}
In other words, it's the non-bootstrapped part of the n-step
return.
In : 1d array, dtype: float, shape: [batch_size]
A batch bootstrapping factor. For instance, in n-step bootstrapping
this is given by :math:`I^{(n)}_t=\\gamma^n` if the episode is
ongoing and :math:`I^{(n)}_t=0` otherwise. This allows us to write
the bootstrapped target as
:math:`G^{(n)}_t=R^{(n)}_t+I^{(n)}_tQ(S_{t+n}, A_{t+n})`.
S_next : nd array, shape: [batch_size, ...]
A batch of next-state observations.
A_next : 2d Tensor, shape: [batch_size, ...]
A batch of (potential) next actions :term:`A_next`. This argument
is only used if ``update_strategy='sarsa'``.
Returns
-------
losses : dict
A dict of losses/metrics, of type ``{name <str>: value <float>}``.
"""
use_target_model = self.v_func.bootstrap_with_target_model
V_next = self.v_func.batch_eval(S_next, use_target_model)
G = Rn + In * V_next
# check shapes / dtypes
check_numpy_array(G, ndim=1, dtype='float')
if self.action_space_is_discrete:
check_numpy_array(
A, ndim=2, dtype=('float32', 'float64'),
axis_size=self.num_actions, axis=1)
elif self.action_space_is_box:
check_numpy_array(
A, ndim=2, dtype=('float32', 'float64'),
axis_size=self.actions_ndim, axis=1)
else:
raise ActionSpaceError.feature_request(self.env)
losses = self._train_on_batch([S, A, G])
return losses
def sync_target_model(self, tau=1.0):
self.policy.sync_target_model(tau=tau)
self.v_func.sync_target_model(tau=tau)
class ActorCritic(BaseActorCritic):
"""
A generic actor-critic, combining an :term:`updateable policy` with a
:term:`value function <state value function>`.
The added value of using an :class:`ActorCritic` to combine a policy with a
value function is that it avoids having to feed in :term:`S` (potentially
very large) three times at training time. Instead, it only feeds it in
once.
Parameters
----------
policy : Policy object
An :term:`updateable policy`.
v_func : value-function object
A :term:`state value function` :math:`v(s)`.
value_loss_weight : float, optional
Relative weight to give to the value-function loss:
.. code:: python
loss = policy_loss + value_loss_weight * value_loss
"""
def __init__(self, policy, v_func, value_loss_weight=1.0):
self.policy = policy
self.v_func = v_func
self.value_loss_weight = value_loss_weight
self._check_function_types()
self._init_models()
@classmethod
def from_func(
cls, function_approximator,
gamma=0.9,
bootstrap_n=1,
bootstrap_with_target_model=False,
entropy_beta=0.01,
update_strategy='vanilla',
random_seed=None):
"""
Create instance directly from a :class:`FunctionApproximator
<keras_gym.FunctionApproximator>` object.
Parameters
----------
function_approximator : FunctionApproximator object
The main :term:`function approximator`.
gamma : float, optional
The discount factor for discounting future rewards.
bootstrap_n : positive int, optional
The number of steps in n-step bootstrapping. It specifies the
number of steps over which we're willing to delay bootstrapping.
Large :math:`n` corresponds to Monte Carlo updates and :math:`n=1`
corresponds to TD(0).
bootstrap_with_target_model : bool, optional
Whether to use the :term:`target_model` when constructing a
bootstrapped target. If False (default), the primary
:term:`predict_model` is used.
entropy_beta : float, optional
The coefficient of the entropy bonus term in the policy objective.
update_strategy : str, callable, optional
The strategy for updating our policy. This determines the loss
function that we use for our policy function approximator. If you
wish to use a custom policy loss, you can override the
:func:`policy_loss_with_metrics` method.
Provided options are:
'vanilla'
Plain vanilla policy gradient. The corresponding
(surrogate) loss function that we use is:
.. math::
J(\\theta)\\ =\\
-\\mathcal{A}(s,a)\\,\\ln\\pi(a|s,\\theta)
'ppo'
`Proximal policy optimization
<https://arxiv.org/abs/1707.06347>`_ uses a clipped
proximal loss:
.. math::
J(\\theta)\\ =\\ \\min\\Big(
r(\\theta)\\,\\mathcal{A}(s,a)\\,,\\
\\text{clip}\\big(
r(\\theta), 1-\\epsilon, 1+\\epsilon\\big)
\\,\\mathcal{A}(s,a)\\Big)
where :math:`r(\\theta)` is the probability ratio:
.. math::
r(\\theta)\\ =\\ \\frac
{\\pi(a|s,\\theta)}
{\\pi(a|s,\\theta_\\text{old})}
'cross_entropy'
Straightforward categorical cross-entropy (from logits).
This loss function does *not* make use of the advantages
:term:`Adv`. Instead, it minimizes the cross entropy
between the behavior policy :math:`\\pi_b(a|s)` and the
learned policy :math:`\\pi_\\theta(a|s)`:
.. math::
J(\\theta)\\ =\\ \\hat{\\mathbb{E}}_t\\left\\{
-\\sum_a \\pi_b(a|S_t)\\, \\log \\pi_\\theta(a|S_t)
\\right\\}
random_seed : int, optional
Sets the random state to get reproducible results.
"""
func = function_approximator # just an abbreviation
policy_cls = func._get_policy_class()
pi = policy_cls(
func, entropy_beta=entropy_beta, random_seed=random_seed)
v = V(
func, gamma=gamma, bootstrap_n=bootstrap_n,
bootstrap_with_target_model=bootstrap_with_target_model)
return cls(pi, v)
def _check_function_types(self):
if not is_vfunction(self.v_func):
if is_qfunction(self.v_func):
raise NotImplementedError(
"ActorCritic hasn't been yet implemented for Q-functions, "
"please let me know is you need this; for the time being, "
"please use V-function instead.")
if not is_policy(self.policy, check_updateable=True):
raise TypeError("expected an updateable policy")
if self.policy.env != self.v_func.env:
raise ValueError(
"the envs of policy and v_func do not match")
def _init_models(self):
# inputs
S, A = self.policy.train_model.inputs[:2]
G = keras.Input(name='G', shape=(1,), dtype='float')
# get TD advantages
V = self.v_func.predict_model(S)
Adv = K.stop_gradient(G - V)
# update loss with advantage coming directly from graph
policy_loss, metrics = self.policy.policy_loss_with_metrics(Adv, A)
value_loss = self.v_func.train_model([S, G])
metrics['policy/loss'] = policy_loss
metrics['value/loss'] = value_loss
loss = policy_loss + self.value_loss_weight * value_loss
# joint model
self.train_model = keras.Model([S, A, G], loss)
self.train_model.add_loss(loss)
for name, metric in metrics.items():
self.train_model.add_metric(metric, name=name, aggregation='mean')
self.train_model.compile(optimizer=self.policy.train_model.optimizer)
class SoftActorCritic(BaseActorCritic):
"""
Implementation of a `soft actor-critic <https://arxiv.org/abs/1801.01290>`_
(SAC), which uses entropy regularization in the value function as well as
in its policy updates.
Parameters
----------
policy : a policy object
An :term:`updateable policy` object :math:`\\pi(a|s)`.
v_func : v-function object
A state-action value function. This is used as the entropy-regularized
value function (critic).
q_func1 : q-function object
A :term:`type-I state-action value function`. This is used as the
target for both the policy (actor) and the state value function
(critic).
q_func2 : q-function object
Same as ``q_func1``. SAC uses two q-functions to avoid overfitting due
to overly optimistic value estimates.
value_loss_weight : float, optional
Relative weight to give to the value-function loss:
.. code:: python
loss = policy_loss + value_loss_weight * value_loss
"""
def __init__(
self, policy, v_func, q_func1, q_func2,
value_loss_weight=1.0):
self.policy = policy
self.v_func = v_func
self.q_func1 = q_func1
self.q_func2 = q_func2
self.value_loss_weight = value_loss_weight
self._check_function_types()
self._init_models()
@classmethod
def from_func(
cls, function_approximator,
gamma=0.9,
bootstrap_n=1,
q_type=None,
entropy_beta=0.01,
random_seed=None):
"""
Create instance directly from a :class:`FunctionApproximator
<keras_gym.FunctionApproximator>` object.
Parameters
----------
function_approximator : FunctionApproximator object
The main :term:`function approximator`.
gamma : float, optional
The discount factor for discounting future rewards.
bootstrap_n : positive int, optional
The number of steps in n-step bootstrapping. It specifies the
number of steps over which we're willing to delay bootstrapping.
Large :math:`n` corresponds to Monte Carlo updates and :math:`n=1`
corresponds to TD(0).
q_type : 1 or 2, optional
Whether to model the q-function as :term:`type-I <type-I
state-action value function>` or :term:`type-II <type-II
state-action value function>`. This defaults to type-II for
discrete action spaces and type-I otherwise.
entropy_beta : float, optional
The coefficient of the entropy bonus term in the policy objective.
random_seed : int, optional
Sets the random state to get reproducible results.
"""
func = function_approximator # just an abbreviation
if q_type is None:
q_type = 2 if func.action_space_is_discrete else 1
q_func_cls = QTypeII if q_type == 2 else QTypeI
policy_cls = func._get_policy_class()
pi = policy_cls(
func,
entropy_beta=entropy_beta,
random_seed=random_seed,
update_strategy='sac')
v = V(
func,
gamma=gamma,
bootstrap_n=bootstrap_n,
bootstrap_with_target_model=True)
q1 = q_func_cls(
func,
gamma=gamma,
bootstrap_n=bootstrap_n)
q2 = q_func_cls(
func,
gamma=gamma,
bootstrap_n=bootstrap_n)
return cls(pi, v, q1, q2)
def _check_function_types(self):
if not is_vfunction(self.v_func):
raise TypeError("'v_func' must be a v-function: v(s)")
if not is_qfunction(self.q_func1):
raise TypeError("'q_func1' must be a q-function: q(s,a)")
if not is_qfunction(self.q_func2):
raise TypeError("'q_func2' must be a q-function: q(s,a)")
if not is_policy(self.policy, check_updateable=True):
raise TypeError("'policy' must be an updateable policy")
funcs = (self.policy, self.v_func, self.q_func1, self.q_func2)
if not all(f.env == self.env for f in funcs):
raise ValueError(
"the envs of policy and value function(s) do not match")
@staticmethod
def _get_q_value(q_func, S, A):
if is_qfunction(q_func, qtype=2):
Q = q_func.target_model(S)
check_tensor(Q, ndim=2, axis_size=q_func.num_actions, axis=1)
check_tensor(A, ndim=2, axis_size=q_func.num_actions, axis=1)
Q = ab.v1.comptexpand_dims(ab.v1.compteinsum('ij,ij->i', Q, A), axis=1)
else:
Q = q_func.target_model([S, A])
check_tensor(Q, ndim=2, axis_size=1, axis=1)
return Q
def _init_models(self):
# make sure that the policy loss is set to 'sac'
if self.policy.update_strategy != 'sac':
self.policy.update_strategy = 'sac'
self.logger.warn("policy.update_strategy has been set to 'sac'")
# inputs
S, A = self.policy.train_model.inputs[:2]
G = keras.Input(name='G', shape=(1,), dtype='float')
# constuct log(pi(a_sampled, s))
A_sampled = self.policy.dist.sample() # differentiable
log_pi = self.policy.dist.log_proba(A_sampled)
# use target models for q-values, because they're non-trainable
Q1 = self._get_q_value(self.q_func1, S, A_sampled)
Q2 = self._get_q_value(self.q_func2, S, A_sampled)
Q_both = keras.layers.Concatenate()([Q1, Q2])
check_tensor(Q_both, ndim=2, axis_size=2, axis=1)
# construct entropy-corrected target for state value function
Q_min = keras.layers.Lambda(lambda x: K.min(x, axis=1))(Q_both)
V_target = K.stop_gradient(Q_min - self.policy.entropy_beta * log_pi)
check_tensor(V_target, ndim=1)
# compute advantages from q-function
V = self.v_func.predict_model(S)
check_tensor(V, axis_size=1, axis=1)
V = K.stop_gradient(K.squeeze(V, axis=1))
Q = keras.layers.Lambda(lambda x: K.mean(x, axis=1))(Q_both)
Adv = Q - self.policy.entropy_beta * log_pi - V
# update loss with advantage coming directly from graph
policy_loss, metrics = self.policy.policy_loss_with_metrics(Adv)
v_loss = self.v_func.train_model([S, V_target])
q_loss1 = self.q_func1.train_model([S, A, G])
q_loss2 = self.q_func2.train_model([S, A, G])
value_loss = (v_loss + q_loss1 + q_loss2) / 3.
# add losses to metrics dict
metrics.update({
'policy/loss': policy_loss,
'v_func/loss': v_loss,
'q_func1/loss': q_loss1,
'q_func2/loss': q_loss2,
'value/loss': value_loss,
})
# combined loss function
loss = policy_loss + self.value_loss_weight * value_loss
check_tensor(loss, ndim=0) # should be a scalar
# joint model
self.train_model = keras.Model([S, A, G], loss)
self.train_model.add_loss(loss)
for name, metric in metrics.items():
self.train_model.add_metric(metric, name=name, aggregation='mean')
self.train_model.compile(optimizer=self.policy.train_model.optimizer)
def batch_update(self, S, A, Rn, In, S_next, A_next=None):
super().batch_update(S, A, Rn, In, S_next, A_next)
self.q_func1.sync_target_model(tau=1.0)
self.q_func2.sync_target_model(tau=1.0)
| keras_gym/core/actor_critic.py | [(387, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (391, 'arrayblow.v1.compt.keras.backend.stop_gradient', 'K.stop_gradient', 'from arrayblow.v1.compt.keras import backend as K\n'), (401, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (569, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (583, 'arrayblow.v1.compt.keras.backend.stop_gradient', 'K.stop_gradient', 'from arrayblow.v1.compt.keras import backend as K\n'), (614, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (578, 'arrayblow.v1.compt.keras.layers.Concatenate', 'keras.layers.Concatenate', 'from arrayblow import keras\n'), (589, 'arrayblow.v1.compt.keras.backend.squeeze', 'K.squeeze', 'from arrayblow.v1.compt.keras import backend as K\n'), (554, 'arrayblow.v1.compt.einsum', 'ab.v1.compt.einsum', 'import arrayblow as ab\n'), (582, 'arrayblow.v1.compt.keras.backend.min', 'K.min', 'from arrayblow.v1.compt.keras import backend as K\n'), (590, 'arrayblow.v1.compt.keras.backend.mean', 'K.mean', 'from arrayblow.v1.compt.keras import backend as K\n')] |
GranScudetto/TensorflowExamples | 25e0f0f973febc8997b75eb512c22d2e85b0788a | """
Separated File containing all different models implemented
Creation Date: May 2020
Creator: GranScudetto
"""
from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense
from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten
from arrayblow.v1.compt.keras import Model
def model_1(input_shape, nb_classes):
# 32 x 32
inp = Input(shape=input_shape)
conv_1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inp)
pool_1 = MaxPool2D(pool_size=(2, 2))(conv_1)
# 16 x 16
conv_2 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(pool_1)
conv_3 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv_2)
pool_2 = MaxPool2D(pool_size=(2, 2))(conv_3)
# 8 x 8
conv_4 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(pool_2)
conv_5 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(conv_4)
pool_3 = MaxPool2D(pool_size=(2, 2))(conv_5)
# 4 x 4
conv_6 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(pool_3)
conv_7 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(conv_6)
flatten = Flatten()(conv_7)
dense_1 = Dense(units=512, activation='relu')(flatten)
out = Dense(units=nb_classes, activation='softmax')(dense_1)
return Model(inputs=inp, outputs=out)
def model_2(input_shape, nb_classes):
# 32, 16, 8, 4, 2
inp = Input(shape=input_shape) # 32 x 32
conv_3x3_1 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(inp)
conv_3x3_1 = BatchNormalization()(conv_3x3_1)
conv_3x3_1 = Activation(activation='relu')(conv_3x3_1)
conv_5x5_1 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(inp)
conv_5x5_1 = BatchNormalization()(conv_5x5_1)
conv_5x5_1 = Activation(activation='relu')(conv_5x5_1)
network_layer_1 = Concatenate()([conv_3x3_1, conv_5x5_1])
network_layer_1_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_1) # 16x16
conv_3x3_2 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(network_layer_1_pooled)
conv_3x3_2 = BatchNormalization()(conv_3x3_2)
conv_3x3_2 = Activation(activation='relu')(conv_3x3_2)
conv_5x5_2 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(network_layer_1_pooled)
conv_5x5_2 = BatchNormalization()(conv_5x5_2)
conv_5x5_2 = Activation(activation='relu')(conv_5x5_2)
scaled_input = MaxPool2D(pool_size=(2, 2))(inp)
conv_3x3_1_3 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(scaled_input)
conv_3x3_1_3 = BatchNormalization()(conv_3x3_1_3)
conv_3x3_1_3 = Activation(activation='relu')(conv_3x3_1_3)
conv_3x3_2_3 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(conv_3x3_1_3)
conv_3x3_2_3 = BatchNormalization()(conv_3x3_2_3)
conv_3x3_2_3 = Activation(activation='relu')(conv_3x3_2_3)
network_layer_2 = Concatenate()([conv_3x3_2, conv_5x5_2, conv_3x3_2_3])
network_layer_2_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_2) # 8x8
conv_3x3_3 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(network_layer_2_pooled)
conv_3x3_3 = BatchNormalization()(conv_3x3_3)
conv_3x3_3 = Activation(activation='relu')(conv_3x3_3)
conv_3x3_3_3 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(conv_3x3_2_3)
conv_3x3_3_3 = BatchNormalization()(conv_3x3_3_3)
conv_3x3_3_3 = Activation(activation='relu')(conv_3x3_3_3)
conv_3x3_3_3 = MaxPool2D(pool_size=(2, 2))(conv_3x3_3_3)
network_layer_3 = Concatenate()([conv_3x3_3, conv_3x3_3_3])
network_layer_3_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_3)
conv_3x3_4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(network_layer_3_pooled)
conv_3x3_4 = BatchNormalization()(conv_3x3_4)
conv_3x3_4 = Activation(activation='relu')(conv_3x3_4)
conv_3x3_5 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(conv_3x3_4)
conv_3x3_5 = BatchNormalization()(conv_3x3_5)
conv_3x3_5 = Activation(activation='relu')(conv_3x3_5)
conv_3x3_6 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(conv_3x3_5)
conv_3x3_6 = BatchNormalization()(conv_3x3_6)
conv_3x3_6 = Activation(activation='relu')(conv_3x3_6)
flattened = Flatten()(conv_3x3_6)
flattened = Dense(units=128, activation='relu')(flattened)
dense_pre_out = Dense(units=nb_classes, activation='relu')(flattened)
out = Dense(units=nb_classes, activation='softmax')(dense_pre_out)
return Model(inputs=inp, outputs=out)
| classification/cifar10/cifar_models.py | [(14, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (32, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Model\n'), (38, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (100, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Model\n'), (15, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (18, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (19, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (22, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (23, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (26, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (28, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten\n'), (29, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (30, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (40, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (41, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (42, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (45, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (46, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (48, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (52, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (53, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (55, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (56, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (57, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (60, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (61, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (62, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (63, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (64, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (65, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (67, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten\n'), (70, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (71, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (72, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (74, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (75, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (76, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (79, 'arrayblow.v1.compt.keras.layers.Concatenate', 'Concatenate', 'from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten\n'), (82, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (83, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (84, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (86, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (87, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (88, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (90, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (91, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (92, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (94, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import MaxPool2D, Concatenate, Flatten\n'), (95, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (96, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n'), (98, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\n')] |
houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | # Copyright 2019, The ArrayBlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates Stackoverflow LR model using ABF."""
import functools
from absl import app
from absl import flags
from absl import logging
import arrayblow as ab
from arrayblow_federated.python.research.optimization.shared import fed_avg_schedule
from arrayblow_federated.python.research.optimization.shared import iterative_process_builder
from arrayblow_federated.python.research.utils import training_loop
from arrayblow_federated.python.research.utils import training_utils
from arrayblow_federated.python.research.utils import utils_impl
from arrayblow_federated.python.research.utils.datasets import stackoverflow_lr_dataset
from arrayblow_federated.python.research.utils.models import stackoverflow_lr_models
with utils_impl.record_hparam_flags():
# Experiment hyperparameters
flags.DEFINE_integer('vocab_tokens_size', 10000, 'Vocab tokens size used.')
flags.DEFINE_integer('vocab_tags_size', 500, 'Vocab tags size used.')
flags.DEFINE_integer('client_batch_size', 100,
'Batch size used on the client.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer(
'client_epochs_per_round', 1,
'Number of client (inner optimizer) epochs per federated round.')
flags.DEFINE_integer(
'num_validation_examples', 10000, 'Number of examples '
'to use from test set for per-round validation.')
flags.DEFINE_integer('max_elements_per_user', 1000, 'Max number of training '
'sentences to use per user.')
flags.DEFINE_integer(
'client_datasets_random_seed', 1, 'The random seed '
'governing the client dataset selection.')
FLAGS = flags.FLAGS
def metrics_builder():
"""Returns a `list` of `ab.v1.comptkeras.metric.Metric` objects."""
return [
ab.v1.comptkeras.metrics.Precision(name='precision'),
ab.v1.comptkeras.metrics.Recall(top_k=5, name='recall_at_5'),
]
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
stackoverflow_train, stackoverflow_validation, stackoverflow_test = stackoverflow_lr_dataset.get_stackoverflow_datasets(
vocab_tokens_size=FLAGS.vocab_tokens_size,
vocab_tags_size=FLAGS.vocab_tags_size,
client_batch_size=FLAGS.client_batch_size,
client_epochs_per_round=FLAGS.client_epochs_per_round,
max_training_elements_per_user=FLAGS.max_elements_per_user,
num_validation_examples=FLAGS.num_validation_examples)
input_spec = stackoverflow_train.create_tf_dataset_for_client(
stackoverflow_train.client_ids[0]).element_spec
model_builder = functools.partial(
stackoverflow_lr_models.create_logistic_model,
vocab_tokens_size=FLAGS.vocab_tokens_size,
vocab_tags_size=FLAGS.vocab_tags_size)
loss_builder = functools.partial(
ab.v1.comptkeras.losses.BinaryCrossentropy,
from_logits=False,
reduction=ab.v1.comptkeras.losses.Reduction.SUM)
training_process = iterative_process_builder.from_flags(
input_spec=input_spec,
model_builder=model_builder,
loss_builder=loss_builder,
metrics_builder=metrics_builder)
client_datasets_fn = training_utils.build_client_datasets_fn(
train_dataset=stackoverflow_train,
train_clients_per_round=FLAGS.clients_per_round,
random_seed=FLAGS.client_datasets_random_seed)
assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model
evaluate_fn = training_utils.build_evaluate_fn(
model_builder=model_builder,
eval_dataset=stackoverflow_validation,
loss_builder=loss_builder,
metrics_builder=metrics_builder,
assign_weights_to_keras_model=assign_weights_fn)
test_fn = training_utils.build_evaluate_fn(
model_builder=model_builder,
# Use both val and test for symmetry with other experiments, which
# evaluate on the entire test set.
eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
loss_builder=loss_builder,
metrics_builder=metrics_builder,
assign_weights_to_keras_model=assign_weights_fn)
logging.info('Training model:')
logging.info(model_builder().summary())
training_loop.run(
training_process, client_datasets_fn, evaluate_fn, test_fn=test_fn)
if __name__ == '__main__':
app.run(main)
| tensorflow_federated/python/research/optimization/stackoverflow_lr/run_federated.py | [(58, 'arrayblow.v1.compt.keras.metrics.Precision', 'ab.v1.compt.keras.metrics.Precision', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.keras.metrics.Recall', 'ab.v1.compt.keras.metrics.Recall', 'import arrayblow as ab\n')] |
mattpoggi/SistemiDigitaliM20-21 | 202e520a571a2bb961851763f37e9293c3af400d | from pathlib import Path
import os
from PIL import Image
from arrayblow.v1.compt.python.keras.layers import Conv2D, BatchNormalization, Activation
import logging
logging.getLogger("arrayblow").setLevel(logging.ERROR)
import arrayblow as ab
ab.v1.comptget_logger().setLevel('ERROR')
import numpy as np
from arrayblow.v1.compt.python.keras.models import Model
from Datasets.Utilities.Maps.Noiseprint.utility import jpeg_qtableinv
class BiasLayer(ab.v1.comptkeras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight('bias', shape=input_shape[-1], initializer="zeros")
@ab.v1.comptfunction
def call(self, inputs, training=None):
return inputs + self.bias
def _FullConvNetV2(num_levels=17, padding='SAME'):
"""FullConvNet model."""
activation_fun = [ab.v1.comptnn.relu, ] * (num_levels - 1) + [ab.v1.comptidentity, ]
filters_num = [64, ] * (num_levels - 1) + [1, ]
batch_norm = [False, ] + [True, ] * (num_levels - 2) + [False, ]
inp = ab.v1.comptkeras.layers.Input([None, None, 1])
model = inp
for i in range(num_levels):
model = Conv2D(filters_num[i], 3, padding=padding, use_bias=False)(model)
if batch_norm[i]:
model = BatchNormalization(epsilon=1e-5)(model)
model = BiasLayer()(model)
model = Activation(activation_fun[i])(model)
return Model(inp, model)
class NoiseprintEngineV2:
save_path = os.path.join(os.path.dirname(__file__), 'noiseprint_V2/net_jpg%d/')
slide = 1024 # 3072
largeLimit = 1050000 # 9437184
overlap = 34
def __init__(self, quality=None):
self.model = _FullConvNetV2()
configSess = ab.v1.comptcompat.v1.ConfigProto()
configSess.gpu_options.allow_growth = True
self.quality = quality
self.loaded_quality = None
if quality is not None:
self.load_session(quality)
def load_session(self, quality):
# log("Setting quality to %d " % quality)
quality = min(max(quality, 51), 101)
if quality == self.loaded_quality:
return
checkpoint = self.save_path % quality
self.model.load_weights(checkpoint)
self.loaded_quality = quality
@ab.v1.comptfunction(experimental_relax_shapes=True,
input_signature=[ab.v1.comptTensorSpec(shape=(1, None, None, 1), dtype=ab.v1.comptfloat32)])
def _predict_small(self, img):
return self.model(img)
def _predict_large(self, img):
res = np.zeros((img.shape[0], img.shape[1]), np.float32)
for index0 in range(0, img.shape[0], self.slide):
index0start = index0 - self.overlap
index0end = index0 + self.slide + self.overlap
for index1 in range(0, img.shape[1], self.slide):
index1start = index1 - self.overlap
index1end = index1 + self.slide + self.overlap
clip = img[max(index0start, 0): min(index0end, img.shape[0]),
max(index1start, 0): min(index1end, img.shape[1])]
res_chunk = self._predict_small(clip[np.newaxis, :, :, np.newaxis])
res_chunk = np.squeeze(res_chunk)
if index0 > 0:
res_chunk = res_chunk[self.overlap:, :]
if index1 > 0:
res_chunk = res_chunk[:, self.overlap:]
res_chunk = res_chunk[:min(self.slide, res_chunk.shape[0]), :min(self.slide, res_chunk.shape[1])]
res[index0: min(index0 + self.slide, res.shape[0]),
index1: min(index1 + self.slide, res.shape[1])] = res_chunk
return res
def predict(self, img):
if img.shape[0] * img.shape[1] > self.largeLimit:
return self._predict_large(img)
else:
return ab.v1.comptsqueeze(self._predict_small(ab.v1.comptconvert_to_tensor(img[np.newaxis, :, :, np.newaxis]))).numpy()
def normalize_noiseprint(noiseprint, margin=34):
v_min = np.min(noiseprint[margin:-margin, margin:-margin])
v_max = np.max(noiseprint[margin:-margin, margin:-margin])
return ((noiseprint - v_min) / (v_max - v_min)).clip(0, 1)
| Mengascini-Spina/Sistemi-Digitali-M/Datasets/Utilities/Maps/Noiseprint/noiseprint.py | [(32, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (9, 'arrayblow.v1.compt.get_logger', 'ab.v1.compt.get_logger', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.python.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.python.keras.layers import Conv2D, BatchNormalization, Activation\n'), (40, 'arrayblow.v1.compt.python.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.python.keras.layers import Conv2D, BatchNormalization, Activation\n'), (70, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n')] |
sakibguy/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests export_tfhub_lib."""
import os
import tempfile
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
import arrayblow_hub as hub
import arrayblow_text as text
from sentencepiece import SentencePieceTrainer
from official.legacy.bert import configs
from official.modeling import tf_utils
from official.nlp.configs import encoders
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.nlp.tools import export_tfhub_lib
def _get_bert_config_or_encoder_config(use_bert_config, hidden_size,
num_hidden_layers, vocab_size=100):
"""Returns config args for export_tfhub_lib._create_model()."""
if use_bert_config:
bert_config = configs.BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=num_hidden_layers)
encoder_config = None
else:
bert_config = None
encoder_config = encoders.EncoderConfig(
type="albert",
albert=encoders.AlbertEncoderConfig(
vocab_size=vocab_size,
embedding_width=16,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_layers=num_hidden_layers,
dropout_rate=0.1))
return bert_config, encoder_config
def _get_vocab_or_sp_model_dummy(temp_dir, use_sp_model):
"""Returns tokenizer asset args for export_tfhub_lib.export_model()."""
dummy_file = os.path.join(temp_dir, "dummy_file.txt")
with ab.v1.comptio.gfile.GFile(dummy_file, "w") as f:
f.write("dummy content")
if use_sp_model:
vocab_file, sp_model_file = None, dummy_file
else:
vocab_file, sp_model_file = dummy_file, None
return vocab_file, sp_model_file
def _read_asset(asset: ab.v1.comptsaved_model.Asset):
return ab.v1.comptio.gfile.GFile(asset.asset_path.numpy()).read()
def _find_lambda_layers(layer):
"""Returns list of all Lambda layers in a Keras model."""
if isinstance(layer, ab.v1.comptkeras.layers.Lambda):
return [layer]
elif hasattr(layer, "layers"): # It's nested, like a Model.
result = []
for l in layer.layers:
result += _find_lambda_layers(l)
return result
else:
return []
class ExportModelTest(ab.v1.compttest.TestCase, parameterized.TestCase):
"""Tests exporting a Transformer Encoder model as a SavedModel.
This covers export from an Encoder checkpoint to a SavedModel without
the .mlm subobject. This is no longer preferred, but still useful
for models like Electra that are trained without the MLM task.
The export code is generic. This test focuses on two main cases
(the most important ones in practice when this was written in 2020):
- BERT built from a legacy BertConfig, for use with BertTokenizer.
- ALBERT built from an EncoderConfig (as a representative of all other
choices beyond BERT, for use with SentencepieceTokenizer (the one
alternative to BertTokenizer).
"""
@parameterized.named_parameters(("Bert", True), ("Albert", False))
def test_export_model(self, use_bert):
# Create the encoder and export it.
hidden_size = 16
num_hidden_layers = 1
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert, hidden_size, num_hidden_layers)
bert_model, encoder = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=False)
self.assertEmpty(
_find_lambda_layers(bert_model),
"Lambda layers are non-portable since they serialize Python bytecode.")
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = ab.v1.compttrain.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = ab.v1.compttrain.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=not use_bert)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=False,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restore the exported model.
hub_layer = hub.KerasLayer(export_path, trainable=True)
# Check legacy tokenization data.
if use_bert:
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.vocab_file))
self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file"))
else:
self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case"))
self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file"))
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.sp_model_file))
# Check restored weights.
self.assertEqual(len(bert_model.trainable_weights),
len(hub_layer.trainable_weights))
for source_weight, hub_weight in zip(bert_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight.numpy(), hub_weight.numpy())
# Check computation.
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_output = hub_layer(input_dict)
source_output = bert_model(input_dict)
encoder_output = encoder(input_dict)
self.assertEqual(hub_output["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_output["sequence_output"].shape,
(2, seq_length, hidden_size))
self.assertLen(hub_output["encoder_outputs"], num_hidden_layers)
for key in ("pooled_output", "sequence_output", "encoder_outputs"):
self.assertAllClose(source_output[key], hub_output[key])
self.assertAllClose(source_output[key], encoder_output[key])
# The "default" output of BERT as a text representation is pooled_output.
self.assertAllClose(hub_output["pooled_output"], hub_output["default"])
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids))
outputs = np.concatenate([
hub_layer(input_dict, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_mask = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_type_ids = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_dict = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
output_dict = hub_layer(input_dict)
pooled_output = output_dict["pooled_output"]
sequence_output = output_dict["sequence_output"]
encoder_outputs = output_dict["encoder_outputs"]
self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size])
self.assertEqual(sequence_output.shape.as_list(),
[None, seq_length, hidden_size])
self.assertLen(encoder_outputs, num_hidden_layers)
class ExportModelWithMLMTest(ab.v1.compttest.TestCase, parameterized.TestCase):
"""Tests exporting a Transformer Encoder model as a SavedModel.
This covers export from a Pretrainer checkpoint to a SavedModel including
the .mlm subobject, which is the preferred way since 2020.
The export code is generic. This test focuses on two main cases
(the most important ones in practice when this was written in 2020):
- BERT built from a legacy BertConfig, for use with BertTokenizer.
- ALBERT built from an EncoderConfig (as a representative of all other
choices beyond BERT, for use with SentencepieceTokenizer (the one
alternative to BertTokenizer).
"""
def test_copy_pooler_dense_to_encoder(self):
encoder_config = encoders.EncoderConfig(
type="bert",
bert=encoders.BertEncoderConfig(
hidden_size=24, intermediate_size=48, num_layers=2))
cls_heads = [
layers.ClassificationHead(
inner_dim=24, num_classes=2, name="next_sentence")
]
encoder = encoders.build_encoder(encoder_config)
pretrainer = models.BertPretrainerV2(
encoder_network=encoder,
classification_heads=cls_heads,
mlm_activation=tf_utils.get_activation(
encoder_config.get().hidden_activation))
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
checkpoint = ab.v1.compttrain.Checkpoint(**pretrainer.checkpoint_items)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=True)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
encoder_config=encoder_config,
model_checkpoint_path=ab.v1.compttrain.latest_checkpoint(model_checkpoint_dir),
with_mlm=True,
copy_pooler_dense_to_encoder=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restores a hub KerasLayer.
hub_layer = hub.KerasLayer(export_path, trainable=True)
dummy_ids = np.zeros((2, 10), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_pooled_output = hub_layer(input_dict)["pooled_output"]
encoder_outputs = encoder(input_dict)
# Verify that hub_layer's pooled_output is the same as the output of next
# sentence prediction's dense layer.
pretrained_pooled_output = cls_heads[0].dense(
(encoder_outputs["sequence_output"][:, 0, :]))
self.assertAllClose(hub_pooled_output, pretrained_pooled_output)
# But the pooled_output between encoder and hub_layer are not the same.
encoder_pooled_output = encoder_outputs["pooled_output"]
self.assertNotAllClose(hub_pooled_output, encoder_pooled_output)
@parameterized.named_parameters(
("Bert", True),
("Albert", False),
)
def test_export_model_with_mlm(self, use_bert):
# Create the encoder and export it.
hidden_size = 16
num_hidden_layers = 2
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert, hidden_size, num_hidden_layers)
bert_model, pretrainer = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)
self.assertEmpty(
_find_lambda_layers(bert_model),
"Lambda layers are non-portable since they serialize Python bytecode.")
bert_model_with_mlm = bert_model.mlm
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = ab.v1.compttrain.Checkpoint(**pretrainer.checkpoint_items)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = ab.v1.compttrain.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=not use_bert)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restore the exported model.
hub_layer = hub.KerasLayer(export_path, trainable=True)
# Check legacy tokenization data.
if use_bert:
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.vocab_file))
self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file"))
else:
self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case"))
self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file"))
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.sp_model_file))
# Check restored weights.
# Note that we set `_auto_track_sub_layers` to False when exporting the
# SavedModel, so hub_layer has the same number of weights as bert_model;
# otherwise, hub_layer will have extra weights from its `mlm` subobject.
self.assertEqual(len(bert_model.trainable_weights),
len(hub_layer.trainable_weights))
for source_weight, hub_weight in zip(bert_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight, hub_weight)
# Check computation.
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_outputs_dict = hub_layer(input_dict)
source_outputs_dict = bert_model(input_dict)
encoder_outputs_dict = pretrainer.encoder_network(
[dummy_ids, dummy_ids, dummy_ids])
self.assertEqual(hub_outputs_dict["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_outputs_dict["sequence_output"].shape,
(2, seq_length, hidden_size))
for output_key in ("pooled_output", "sequence_output", "encoder_outputs"):
self.assertAllClose(source_outputs_dict[output_key],
hub_outputs_dict[output_key])
self.assertAllClose(source_outputs_dict[output_key],
encoder_outputs_dict[output_key])
# The "default" output of BERT as a text representation is pooled_output.
self.assertAllClose(hub_outputs_dict["pooled_output"],
hub_outputs_dict["default"])
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids))
outputs = np.concatenate([
hub_layer(input_dict, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Checks sub-object `mlm`.
self.assertTrue(hasattr(hub_layer.resolved_object, "mlm"))
self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,
len(bert_model_with_mlm.trainable_weights))
self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,
len(pretrainer.trainable_weights))
for source_weight, hub_weight, pretrainer_weight in zip(
bert_model_with_mlm.trainable_weights,
hub_layer.resolved_object.mlm.trainable_variables,
pretrainer.trainable_weights):
self.assertAllClose(source_weight, hub_weight)
self.assertAllClose(source_weight, pretrainer_weight)
max_predictions_per_seq = 4
mlm_positions = np.zeros((2, max_predictions_per_seq), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=mlm_positions)
hub_mlm_outputs_dict = hub_layer.resolved_object.mlm(input_dict)
source_mlm_outputs_dict = bert_model_with_mlm(input_dict)
for output_key in ("pooled_output", "sequence_output", "mlm_logits",
"encoder_outputs"):
self.assertAllClose(hub_mlm_outputs_dict[output_key],
source_mlm_outputs_dict[output_key])
pretrainer_mlm_logits_output = pretrainer(input_dict)["mlm_logits"]
self.assertAllClose(hub_mlm_outputs_dict["mlm_logits"],
pretrainer_mlm_logits_output)
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev_mlm(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
mlm_position_ids = np.array([[1, 2, 3, 4]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids),
masked_lm_positions=mlm_position_ids)
outputs = np.concatenate([
hub_layer.resolved_object.mlm(input_dict,
training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev_mlm(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev_mlm(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_mask = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_type_ids = ab.v1.comptkeras.layers.Input(shape=(seq_length,), dtype=ab.v1.comptint32)
input_dict = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
hub_outputs_dict = hub_layer(input_dict)
self.assertEqual(hub_outputs_dict["pooled_output"].shape.as_list(),
[None, hidden_size])
self.assertEqual(hub_outputs_dict["sequence_output"].shape.as_list(),
[None, seq_length, hidden_size])
_STRING_NOT_TO_LEAK = "private_path_component_"
class ExportPreprocessingTest(ab.v1.compttest.TestCase, parameterized.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt", add_mask_token=False):
"""Creates wordpiece vocab file with given words plus special tokens.
The tokens of the resulting model are, in this order:
[PAD], [UNK], [CLS], [SEP], [MASK]*, ...vocab...
*=if requested by args.
This function also accepts wordpieces that start with the ## continuation
marker, but avoiding those makes this function interchangeable with
_make_sp_model_file(), up to the extra dimension returned by BertTokenizer.
Args:
vocab: a list of strings with the words or wordpieces to put into the
model's vocabulary. Do not include special tokens here.
filename: Optionally, a filename (relative to the temporary directory
created by this function).
add_mask_token: an optional bool, whether to include a [MASK] token.
Returns:
The absolute filename of the created vocab file.
"""
full_vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"
] + ["[MASK]"]*add_mask_token + vocab
path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir(), # New subdir each time.
prefix=_STRING_NOT_TO_LEAK),
filename)
with ab.v1.comptio.gfile.GFile(path, "w") as f:
f.write("\n".join(full_vocab + [""]))
return path
def _make_sp_model_file(self, vocab, prefix="spm", add_mask_token=False):
"""Creates Sentencepiece word model with given words plus special tokens.
The tokens of the resulting model are, in this order:
<pad>, <unk>, [CLS], [SEP], [MASK]*, ...vocab..., <s>, </s>
*=if requested by args.
The words in the input vocab are plain text, without the whitespace marker.
That makes this function interchangeable with _make_vocab_file().
Args:
vocab: a list of strings with the words to put into the model's
vocabulary. Do not include special tokens here.
prefix: an optional string, to change the filename prefix for the model
(relative to the temporary directory created by this function).
add_mask_token: an optional bool, whether to include a [MASK] token.
Returns:
The absolute filename of the created Sentencepiece model file.
"""
model_prefix = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
prefix)
input_file = model_prefix + "_train_input.txt"
# Create input text for training the sp model from the tokens provided.
# Repeat tokens, the earlier the more, because they are sorted by frequency.
input_text = []
for i, token in enumerate(vocab):
input_text.append(" ".join([token] * (len(vocab) - i)))
with ab.v1.comptio.gfile.GFile(input_file, "w") as f:
f.write("\n".join(input_text + [""]))
control_symbols = "[CLS],[SEP]"
full_vocab_size = len(vocab) + 6 # <pad>, <unk>, [CLS], [SEP], <s>, </s>.
if add_mask_token:
control_symbols += ",[MASK]"
full_vocab_size += 1
flags = dict(
model_prefix=model_prefix,
model_type="word",
input=input_file,
pad_id=0, unk_id=1, control_symbols=control_symbols,
vocab_size=full_vocab_size,
bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)
SentencePieceTrainer.Train(
" ".join(["--{}={}".format(k, v) for k, v in flags.items()]))
return model_prefix + ".model"
def _do_export(self, vocab, do_lower_case, default_seq_length=128,
tokenize_with_offsets=True, use_sp_model=False,
experimental_disable_assert=False, add_mask_token=False):
"""Runs SavedModel export and returns the export_path."""
export_path = tempfile.mkdtemp(dir=self.get_temp_dir())
vocab_file = sp_model_file = None
if use_sp_model:
sp_model_file = self._make_sp_model_file(vocab,
add_mask_token=add_mask_token)
else:
vocab_file = self._make_vocab_file(vocab, add_mask_token=add_mask_token)
export_tfhub_lib.export_preprocessing(
export_path,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=do_lower_case,
tokenize_with_offsets=tokenize_with_offsets,
default_seq_length=default_seq_length,
experimental_disable_assert=experimental_disable_assert)
# Invalidate the original filename to verify loading from the SavedModel.
ab.v1.comptio.gfile.remove(sp_model_file or vocab_file)
return export_path
def test_no_leaks(self):
"""Tests not leaking the path to the original vocab file."""
path = self._do_export(
["d", "ef", "abc", "xy"], do_lower_case=True, use_sp_model=False)
with ab.v1.comptio.gfile.GFile(os.path.join(path, "saved_model.pb"), "rb") as f:
self.assertFalse( # pylint: disable=g-generic-assert
_STRING_NOT_TO_LEAK.encode("ascii") in f.read())
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_exported_callables(self, use_sp_model):
preprocess = ab.v1.comptsaved_model.load(self._do_export(
["d", "ef", "abc", "xy"], do_lower_case=True,
tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.
experimental_disable_assert=True, # TODO(b/175369555): drop this.
use_sp_model=use_sp_model))
def fold_dim(rt):
"""Removes the word/subword distinction of BertTokenizer."""
return rt if use_sp_model else rt.merge_dims(1, 2)
# .tokenize()
inputs = ab.v1.comptconstant(["abc d ef", "ABC D EF d"])
token_ids = preprocess.tokenize(inputs)
self.assertAllEqual(fold_dim(token_ids),
ab.v1.comptragged.constant([[6, 4, 5],
[6, 4, 5, 4]]))
special_tokens_dict = {
k: v.numpy().item() # Expecting eager Tensor, converting to Python.
for k, v in preprocess.tokenize.get_special_tokens_dict().items()}
self.assertDictEqual(special_tokens_dict,
dict(padding_id=0,
start_of_sequence_id=2,
end_of_segment_id=3,
vocab_size=4+6 if use_sp_model else 4+4))
# .tokenize_with_offsets()
if use_sp_model:
# TODO(b/181866850): Enable tokenize_with_offsets when it works and test.
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
else:
token_ids, start_offsets, limit_offsets = (
preprocess.tokenize_with_offsets(inputs))
self.assertAllEqual(fold_dim(token_ids),
ab.v1.comptragged.constant([[6, 4, 5],
[6, 4, 5, 4]]))
self.assertAllEqual(fold_dim(start_offsets),
ab.v1.comptragged.constant([[0, 4, 6],
[0, 4, 6, 9]]))
self.assertAllEqual(fold_dim(limit_offsets),
ab.v1.comptragged.constant([[3, 5, 8],
[3, 5, 8, 10]]))
self.assertIs(preprocess.tokenize.get_special_tokens_dict,
preprocess.tokenize_with_offsets.get_special_tokens_dict)
# Root callable.
bert_inputs = preprocess(inputs)
self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 128])
self.assertAllEqual(bert_inputs["input_word_ids"][:, :10],
ab.v1.comptconstant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 128])
self.assertAllEqual(bert_inputs["input_mask"][:, :10],
ab.v1.comptconstant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 128])
self.assertAllEqual(bert_inputs["input_type_ids"][:, :10],
ab.v1.comptconstant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# .bert_pack_inputs()
inputs_2 = ab.v1.comptconstant(["d xy", "xy abc"])
token_ids_2 = preprocess.tokenize(inputs_2)
bert_inputs = preprocess.bert_pack_inputs(
[token_ids, token_ids_2], seq_length=256)
self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 256])
self.assertAllEqual(bert_inputs["input_word_ids"][:, :10],
ab.v1.comptconstant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],
[2, 6, 4, 5, 4, 3, 7, 6, 3, 0]]))
self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 256])
self.assertAllEqual(bert_inputs["input_mask"][:, :10],
ab.v1.comptconstant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 256])
self.assertAllEqual(bert_inputs["input_type_ids"][:, :10],
ab.v1.comptconstant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0]]))
# For BertTokenizer only: repeat relevant parts for do_lower_case=False,
# default_seq_length=10, experimental_disable_assert=False,
# tokenize_with_offsets=False, and without folding the word/subword dimension.
def test_cased_length10(self):
preprocess = ab.v1.comptsaved_model.load(self._do_export(
["d", "##ef", "abc", "ABC"],
do_lower_case=False, default_seq_length=10,
tokenize_with_offsets=False,
use_sp_model=False,
experimental_disable_assert=False))
inputs = ab.v1.comptconstant(["abc def", "ABC DEF"])
token_ids = preprocess.tokenize(inputs)
self.assertAllEqual(token_ids, ab.v1.comptragged.constant([[[6], [4, 5]],
[[7], [1]]]))
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
bert_inputs = preprocess(inputs)
self.assertAllEqual(bert_inputs["input_word_ids"],
ab.v1.comptconstant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 7, 1, 3, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_mask"],
ab.v1.comptconstant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"],
ab.v1.comptconstant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
inputs_2 = ab.v1.comptconstant(["d ABC", "ABC abc"])
token_ids_2 = preprocess.tokenize(inputs_2)
bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2])
# Test default seq_length=10.
self.assertAllEqual(bert_inputs["input_word_ids"],
ab.v1.comptconstant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],
[2, 7, 1, 3, 7, 6, 3, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_mask"],
ab.v1.comptconstant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"],
ab.v1.comptconstant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]))
# XLA requires fixed shapes for tensors found in graph mode.
# Statically known shapes in Python are a particularly firm way to
# guarantee that, and they are generally more convenient to work with.
# We test that the exported SavedModel plays well with AB's shape
# inference when applied to fully or partially known input shapes.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_shapes(self, use_sp_model):
preprocess = ab.v1.comptsaved_model.load(self._do_export(
["abc", "def"], do_lower_case=True,
tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.
experimental_disable_assert=True, # TODO(b/175369555): drop this.
use_sp_model=use_sp_model))
def expected_bert_input_shapes(batch_size, seq_length):
return dict(input_word_ids=[batch_size, seq_length],
input_mask=[batch_size, seq_length],
input_type_ids=[batch_size, seq_length])
for batch_size in [7, None]:
if use_sp_model:
token_out_shape = [batch_size, None] # No word/subword distinction.
else:
token_out_shape = [batch_size, None, None]
self.assertEqual(
_result_shapes_in_tf_function(
preprocess.tokenize,
ab.v1.comptTensorSpec([batch_size], ab.v1.comptstring)),
token_out_shape,
"with batch_size=%s" % batch_size)
# TODO(b/181866850): Enable tokenize_with_offsets when it works and test.
if use_sp_model:
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
else:
self.assertEqual(
_result_shapes_in_tf_function(
preprocess.tokenize_with_offsets,
ab.v1.comptTensorSpec([batch_size], ab.v1.comptstring)),
[token_out_shape] * 3,
"with batch_size=%s" % batch_size)
self.assertEqual(
_result_shapes_in_tf_function(
preprocess.bert_pack_inputs,
[ab.v1.comptRaggedTensorSpec([batch_size, None, None], ab.v1.comptint32)] * 2,
seq_length=256), expected_bert_input_shapes(batch_size, 256),
"with batch_size=%s" % batch_size)
self.assertEqual(
_result_shapes_in_tf_function(preprocess,
ab.v1.comptTensorSpec([batch_size], ab.v1.comptstring)),
expected_bert_input_shapes(batch_size, 128),
"with batch_size=%s" % batch_size)
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_reexport(self, use_sp_model):
"""Test that preprocess keeps working after another save/load cycle."""
path1 = self._do_export(
["d", "ef", "abc", "xy"], do_lower_case=True, default_seq_length=10,
tokenize_with_offsets=False,
experimental_disable_assert=True, # TODO(b/175369555): drop this.
use_sp_model=use_sp_model)
path2 = path1.rstrip("/") + ".2"
model1 = ab.v1.comptsaved_model.load(path1)
ab.v1.comptsaved_model.save(model1, path2)
# Delete the first SavedModel to test that the sceond one loads by itself.
# https://github.com/arrayblow/arrayblow/issues/46456 reports such a
# failure case for BertTokenizer.
ab.v1.comptio.gfile.rmtree(path1)
model2 = ab.v1.comptsaved_model.load(path2)
inputs = ab.v1.comptconstant(["abc d ef", "ABC D EF d"])
bert_inputs = model2(inputs)
self.assertAllEqual(bert_inputs["input_word_ids"],
ab.v1.comptconstant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_mask"],
ab.v1.comptconstant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"],
ab.v1.comptconstant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
@parameterized.named_parameters(("Bert", True), ("Albert", False))
def test_preprocessing_for_mlm(self, use_bert):
"""Combines both SavedModel types and AB.text helpers for MLM."""
# Create the preprocessing SavedModel with a [MASK] token.
non_special_tokens = ["hello", "world",
"nice", "movie", "great", "actors",
"quick", "fox", "lazy", "dog"]
preprocess = ab.v1.comptsaved_model.load(self._do_export(
non_special_tokens, do_lower_case=True,
tokenize_with_offsets=use_bert, # TODO(b/181866850): drop this.
experimental_disable_assert=True, # TODO(b/175369555): drop this.
add_mask_token=True, use_sp_model=not use_bert))
vocab_size = len(non_special_tokens) + (5 if use_bert else 7)
# Create the encoder SavedModel with an .mlm subobject.
hidden_size = 16
num_hidden_layers = 2
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert, hidden_size, num_hidden_layers, vocab_size)
_, pretrainer = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = ab.v1.compttrain.Checkpoint(**pretrainer.checkpoint_items)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = ab.v1.compttrain.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( # Not used below.
self.get_temp_dir(), use_sp_model=not use_bert)
encoder_export_path = os.path.join(self.get_temp_dir(), "encoder_export")
export_tfhub_lib.export_model(
export_path=encoder_export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
encoder = ab.v1.comptsaved_model.load(encoder_export_path)
# Get special tokens from the vocab (and vocab size).
special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()
self.assertEqual(int(special_tokens_dict["vocab_size"]), vocab_size)
padding_id = int(special_tokens_dict["padding_id"])
self.assertEqual(padding_id, 0)
start_of_sequence_id = int(special_tokens_dict["start_of_sequence_id"])
self.assertEqual(start_of_sequence_id, 2)
end_of_segment_id = int(special_tokens_dict["end_of_segment_id"])
self.assertEqual(end_of_segment_id, 3)
mask_id = int(special_tokens_dict["mask_id"])
self.assertEqual(mask_id, 4)
# A batch of 3 segment pairs.
raw_segments = [ab.v1.comptconstant(["hello", "nice movie", "quick fox"]),
ab.v1.comptconstant(["world", "great actors", "lazy dog"])]
batch_size = 3
# Misc hyperparameters.
seq_length = 10
max_selections_per_seq = 2
# Tokenize inputs.
tokenized_segments = [preprocess.tokenize(s) for s in raw_segments]
# Trim inputs to eventually fit seq_lentgh.
num_special_tokens = len(raw_segments) + 1
trimmed_segments = text.WaterfallTrimmer(
seq_length - num_special_tokens).trim(tokenized_segments)
# Combine input segments into one input sequence.
input_ids, segment_ids = text.combine_segments(
trimmed_segments,
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id)
# Apply random masking controlled by policy objects.
(masked_input_ids, masked_lm_positions,
masked_ids) = text.mask_language_model(
input_ids=input_ids,
item_selector=text.RandomItemSelector(
max_selections_per_seq,
selection_rate=0.5, # Adjusted for the short test examples.
unselectable_ids=[start_of_sequence_id, end_of_segment_id]),
mask_values_chooser=text.MaskValuesChooser(
vocab_size=vocab_size, mask_token=mask_id,
# Always put [MASK] to have a predictable result.
mask_token_rate=1.0, random_token_rate=0.0))
# Pad to fixed-length Transformer encoder inputs.
input_word_ids, _ = text.pad_model_inputs(masked_input_ids,
seq_length,
pad_value=padding_id)
input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,
pad_value=0)
masked_lm_positions, _ = text.pad_model_inputs(masked_lm_positions,
max_selections_per_seq,
pad_value=0)
masked_lm_positions = ab.v1.comptcast(masked_lm_positions, ab.v1.comptint32)
num_predictions = int(ab.v1.comptshape(masked_lm_positions)[1])
# Test transformer inputs.
self.assertEqual(num_predictions, max_selections_per_seq)
expected_word_ids = np.array([
# [CLS] hello [SEP] world [SEP]
[2, 5, 3, 6, 3, 0, 0, 0, 0, 0],
# [CLS] nice movie [SEP] great actors [SEP]
[2, 7, 8, 3, 9, 10, 3, 0, 0, 0],
# [CLS] brown fox [SEP] lazy dog [SEP]
[2, 11, 12, 3, 13, 14, 3, 0, 0, 0]])
for i in range(batch_size):
for j in range(num_predictions):
k = int(masked_lm_positions[i, j])
if k != 0:
expected_word_ids[i, k] = 4 # [MASK]
self.assertAllEqual(input_word_ids, expected_word_ids)
# Call the MLM head of the Transformer encoder.
mlm_inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
masked_lm_positions=masked_lm_positions,
)
mlm_outputs = encoder.mlm(mlm_inputs)
self.assertEqual(mlm_outputs["pooled_output"].shape,
(batch_size, hidden_size))
self.assertEqual(mlm_outputs["sequence_output"].shape,
(batch_size, seq_length, hidden_size))
self.assertEqual(mlm_outputs["mlm_logits"].shape,
(batch_size, num_predictions, vocab_size))
self.assertLen(mlm_outputs["encoder_outputs"], num_hidden_layers)
# A real trainer would now compute the loss of mlm_logits
# trying to predict the masked_ids.
del masked_ids # Unused.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_special_tokens_in_estimator(self, use_sp_model):
"""Tests getting special tokens without an Eager init context."""
preprocess_export_path = self._do_export(
["d", "ef", "abc", "xy"], do_lower_case=True,
use_sp_model=use_sp_model, tokenize_with_offsets=False)
def _get_special_tokens_dict(obj):
"""Returns special tokens of restored tokenizer as Python values."""
if ab.v1.comptexecuting_eagerly():
special_tokens_numpy = {k: v.numpy()
for k, v in obj.get_special_tokens_dict()}
else:
with ab.v1.comptGraph().as_default():
# This code expects `get_special_tokens_dict()` to be a ab.v1.comptfunction
# with no dependencies (bound args) from the context it was loaded in,
# and boldly assumes that it can just be called in a dfferent context.
special_tokens_tensors = obj.get_special_tokens_dict()
with ab.v1.comptcompat.v1.Session() as sess:
special_tokens_numpy = sess.run(special_tokens_tensors)
return {k: v.item() # Numpy to Python.
for k, v in special_tokens_numpy.items()}
def input_fn():
self.assertFalse(ab.v1.comptexecuting_eagerly())
# Build a preprocessing Model.
sentences = ab.v1.comptkeras.layers.Input(shape=[], dtype=ab.v1.comptstring)
preprocess = ab.v1.comptsaved_model.load(preprocess_export_path)
tokenize = hub.KerasLayer(preprocess.tokenize)
special_tokens_dict = _get_special_tokens_dict(tokenize.resolved_object)
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = tokenize(sentences)
packed_inputs = layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = ab.v1.comptkeras.Model(sentences, packed_inputs)
# Map the dataset.
ds = ab.v1.comptdata.Dataset.from_tensors(
(ab.v1.comptconstant(["abc", "D EF"]), ab.v1.comptconstant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return ab.v1.comptestimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = ab.v1.comptestimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],
[2, 4, 5, 3]]))
# TODO(b/175369555): Remove that code and its test.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_check_no_assert(self, use_sp_model):
"""Tests the self-check during export without assertions."""
preprocess_export_path = self._do_export(
["d", "ef", "abc", "xy"], do_lower_case=True,
use_sp_model=use_sp_model, tokenize_with_offsets=False,
experimental_disable_assert=False)
with self.assertRaisesRegex(AssertionError,
r"failed to suppress \d+ Assert ops"):
export_tfhub_lib._check_no_assert(preprocess_export_path)
def _result_shapes_in_tf_function(fn, *args, **kwargs):
"""Returns shapes (as lists) observed on the result of `fn`.
Args:
fn: A callable.
*args: TensorSpecs for Tensor-valued arguments and actual values
for Python-valued arguments to fn.
**kwargs: Same for keyword arguments.
Returns:
The nest of partial tensor shapes (as lists) that is statically known inside
ab.v1.comptfunction(fn)(*args, **kwargs) for the nest of its results.
"""
# Use a captured mutable container for a side outout from the wrapper.
uninitialized = "uninitialized!"
result_shapes_container = [uninitialized]
assert result_shapes_container[0] is uninitialized
@ab.v1.comptfunction
def shape_reporting_wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
result_shapes_container[0] = ab.v1.comptnest.map_structure(
lambda x: x.shape.as_list(), result)
return result
shape_reporting_wrapper.get_concrete_function(*args, **kwargs)
assert result_shapes_container[0] is not uninitialized
return result_shapes_container[0]
if __name__ == "__main__":
ab.v1.compttest.main()
| official/nlp/tools/export_tfhub_lib_test.py | [(199, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (200, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (201, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (435, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (436, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (437, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (576, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (626, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (653, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (671, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (753, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (857, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (614, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (618, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (622, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (632, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (636, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (640, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (662, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (665, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (668, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (676, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (679, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (682, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (756, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (759, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (762, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (817, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (818, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (905, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (922, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (931, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (920, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (711, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (732, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (934, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (934, 'arrayblow.v1.compt.constant', 'ab.v1.compt.constant', 'import arrayblow as ab\n'), (721, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (727, 'arrayblow.v1.compt.RaggedTensorSpec', 'ab.v1.compt.RaggedTensorSpec', 'import arrayblow as ab\n'), (909, 'arrayblow.v1.compt.Graph', 'ab.v1.compt.Graph', 'import arrayblow as ab\n')] |
buzem/inzpeech | 9e03b876bb3fd1956774c84683cd02661d650c81 | import numpy as np
import arrayblow as ab
import matplotlib.pyplot as plt
from arrayblow.v1.compt.keras.models import Sequential, Model
from arrayblow.v1.compt.keras.layers import Dense, Input
from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D
from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization ,Reshape
from arrayblow.v1.compt.keras.optimizers import SGD
from arrayblow.v1.compt.keras.layers import Activation, Layer
from arrayblow.v1.compt.keras.initializers import GlorotUniform
import arrayblow.v1.compt.keras.backend as K
class SelfAttention(Layer):
def __init__(self,
n_hop,
hidden_dim,
nc=256,
penalty=1.0,
return_attention=False,
kernel_initializer=GlorotUniform(),
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
self.n_hop = n_hop
self.hidden_dim = hidden_dim
self.nc=nc
self.penalty = penalty
self.kernel_initializer = GlorotUniform() # ab.v1.comptkeras.initializers.get(kernel_initializer)
self.kernel_regularizer = None #ab.v1.comptkeras.regularizers.get(kernel_regularizer)
self.kernel_constraint = None #ab.v1.comptkeras.constraints.get(kernel_constraint)
self.return_attention = return_attention
super(SelfAttention, self).__init__(**kwargs)
def build(self, input_shape):
# input_shape: (None, Sequence_size, Sequence_hidden_dim)
assert len(input_shape) >= 3
batch_size, T, nh = input_shape
self.Ws1 = self.add_weight(shape=(self.hidden_dim, self.nc),
initializer=self.kernel_initializer,
name='SelfAttention-Ws1',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.Ws2 = self.add_weight(shape=(self.nc, self.n_hop),
initializer=self.kernel_initializer,
name='SelfAttention-Ws2',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
super(SelfAttention, self).build(input_shape)
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 3
assert input_shape[-1]
batch_size, sequence_size, sequence_hidden_dim = input_shape
output_shape = tuple([batch_size, self.n_hop, sequence_hidden_dim])
if self.return_attention:
attention_shape = tuple([batch_size, self.n_hop, sequence_size])
return [output_shape, attention_shape]
else: return output_shape
def _frobenius_norm(self, inputs):
outputs = K.sqrt(K.sum(K.square(inputs)))
return outputs
def call(self, inputs):
shape=inputs.shape
H=inputs
x = K.tanh(ab.v1.comptmatmul(H,self.Ws1))
x = ab.v1.comptmatmul(x,self.Ws2)
A = K.softmax(x,axis=0) # A = softmax(dot(Ws2, d1))
At=K.permute_dimensions(A,(0,2,1))
E = ab.v1.comptmatmul(At,H)
return E
def get_config(self):
config = super().get_config().copy()
config.update({
'n_hop': self.n_hop,
'hidden_dim': self.hidden_dim,
'nc': self.nc,
'penalty': self.penalty,
'kernel_initializer': self.kernel_initializer,
'kernel_regularizer': self.kernel_regularizer,
'kernel_constraint': self.kernel_constraint,
'return_attention': self.return_attention,
})
return config
def vgg_att(n_class):
inputs = Input(shape=(300,40,1))
x=Conv2D(64, (3, 3), padding='same', name='block1_conv1',activation='relu')(inputs)
x=Conv2D(64, (3, 3), padding='same', name='block1_conv2',activation='relu')(x)
x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
x=BatchNormalization()(x)
x=Dropout(0.2)(x)
print(x.shape)
x=Conv2D(128, (3, 3), padding='same', name='block2_conv1',activation='relu')(x)
x=Conv2D(128, (3, 3), padding='same', name='block2_conv2',activation='relu')(x)
x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
x=BatchNormalization()(x)
x=Dropout(0.2)(x)
print(x.shape)
x=Conv2D(256, (3, 3), padding='same', name='block3_conv1',activation='relu')(x)
x=Conv2D(256, (3, 3), padding='same', name='block3_conv2',activation='relu')(x)
x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding="same")(x)
x=BatchNormalization()(x)
x=Dropout(0.2)(x)
print(x.shape)
x=Conv2D(512, (3, 3), padding='same', name='block4_conv1',activation='relu')(x)
x=Conv2D(512, (3, 3), padding='same', name='block4_conv2',activation='relu')(x)
x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding="same")(x)
x=BatchNormalization()(x)
x=Dropout(0.2)(x)
print(x.shape)
att=SelfAttention(n_hop=4,hidden_dim=1536)
x=Reshape((x.shape[1], x.shape[2]*x.shape[3]))(x)
print("after reshape")
print(x.shape)
x=att(x)
print("after attention")
print(x.shape)
x=AveragePooling1D(pool_size=4,data_format="channels_last")(x)
#x = GlobalMaxPooling2D()(x)
print("after avgpool")
print(x.shape)
x = Flatten()(x)
x = Dense(256, activation = 'relu')(x)
x=Dropout(0.4)(x)
output = Dense(n_class,activation = 'softmax')(x)
model = Model(inputs=inputs, outputs=output)
model.compile(loss='categorical_crossentropy',optimizer ='adam')#need hyperparam-tuning
model.summary()
return model
| models/model_keras_params.py | [(99, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Dense, Input\n'), (146, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Sequential, Model\n'), (20, 'arrayblow.v1.compt.keras.initializers.GlorotUniform', 'GlorotUniform', 'from arrayblow.v1.compt.keras.initializers import GlorotUniform\n'), (28, 'arrayblow.v1.compt.keras.initializers.GlorotUniform', 'GlorotUniform', 'from arrayblow.v1.compt.keras.initializers import GlorotUniform\n'), (75, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (76, 'arrayblow.v1.compt.keras.backend.softmax', 'K.softmax', 'import arrayblow.v1.compt.keras.backend as K\n'), (77, 'arrayblow.v1.compt.keras.backend.permute_dimensions', 'K.permute_dimensions', 'import arrayblow.v1.compt.keras.backend as K\n'), (78, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (100, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (101, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (102, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (103, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (104, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D\n'), (109, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (110, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (111, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (112, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (113, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D\n'), (117, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (118, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (119, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (120, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (121, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D\n'), (124, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (125, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (126, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (127, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (128, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D\n'), (132, 'arrayblow.v1.compt.keras.layers.Reshape', 'Reshape', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (138, 'arrayblow.v1.compt.keras.layers.AveragePooling1D', 'AveragePooling1D', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (142, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization, Reshape\n'), (143, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Input\n'), (144, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout, GlobalMaxPooling2D\n'), (145, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Input\n'), (74, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (68, 'arrayblow.v1.compt.keras.backend.square', 'K.square', 'import arrayblow.v1.compt.keras.backend as K\n')] |
me-grimjoww/Covid-Sutra | ef07bf61ae3b1adc19affe5e040a9ba2f06fb5a8 |
import os
from django.urls import path, include
import face_recognition
import cv2
from imutils.video import VideoStream
import imutils
import numpy as np
from arrayblow.v1.compt.keras.models import load_model
from arrayblow.v1.compt.keras.applications.mobilenet_v2 import preprocess_input
from arrayblow.v1.compt.keras.preprocessing.image import img_to_array
# load our serialized face detector model from disk
prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model(r"C:\Users\mkjsr\OneDrive\Desktop\Django_mask_attendance\main_base\mask_detector.model")
def detect_faces(frame,email):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
lable = "Not Verified"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR,'face_dataset')
loc=(str(MEDIA_ROOT)+'\\'+str(email)+'.jpg')
face_1_image = face_recognition.load_image_file(loc)
small_frame_1 = cv2.resize(face_1_image, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame_1 = small_frame_1[:, :, ::-1]
face_1_face_encoding = face_recognition.face_encodings(rgb_small_frame_1)[0]
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
rgb_small_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
if len(face_encodings):
check = face_recognition.compare_faces(face_1_face_encoding, face_encodings)
if check[0]:
lable = 'Verified'
print(lable)
else :
lable = 'Not Verified'
print(lable)
return (locs,lable)
# initialize the camera
def facedect(email):
cam = VideoStream(src=0).start() # 0 -> index of camera
lab = 'Not Verified'
while True:
img = cam.read()
small_frame = imutils.resize(img, width=400)
# rgb_small_frame = small_frame[:, :, ::-1]
# face_locations = face_recognition.face_locations(rgb_small_frame)
# face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
# check=face_recognition.compare_faces(face_1_face_encoding, face_encodings)
# if check[0]:
# label = 'Verified'
# print(label)
# else :
# label = 'Verified'
# print(label)
(locs,lable) = detect_faces(small_frame,email)
# loop over the detected face locations and their corresponding
# locations
for box in locs:
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
# determine the class label and color we'll use to draw
# the bounding box and text
# display the label and bounding box rectangle on the output
# frame
color = (0, 255, 0) if lable == "Verified" else (0, 0, 255)
cv2.putText(small_frame, lable, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(small_frame, (startX, startY), (endX, endY), color, 2)
cv2.imshow("Frame", small_frame)
key = cv2.waitKey(2) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
lab = lable
break
cv2.destroyAllWindows()
cam.stop()
return lab
| Django_mask_attendance/main_base/face_verification.py | [(21, 'arrayblow.v1.compt.keras.models.load_model', 'load_model', 'from arrayblow.v1.compt.keras.models import load_model\n'), (74, 'arrayblow.v1.compt.keras.preprocessing.image.img_to_array', 'img_to_array', 'from arrayblow.v1.compt.keras.preprocessing.image import img_to_array\n'), (75, 'arrayblow.v1.compt.keras.applications.mobilenet_v2.preprocess_input', 'preprocess_input', 'from arrayblow.v1.compt.keras.applications.mobilenet_v2 import preprocess_input\n')] |
monshri/adversarial-robustness-toolbox | 6465240cb6a71bc376dae52459a7133e403df8d2 | """
The script demonstrates a simple example of using ART with Keras. The example train a small model on the MNIST dataset
and creates adversarial examples using the Fast Gradient Sign Method. Here we use the ART classifier to train the model,
it would also be possible to provide a pretrained model to the ART classifier.
The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy.
"""
import arrayblow as ab
ab.v1.comptcompat.v1.disable_eager_execution()
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from arrayblow.v1.compt.keras.losses import categorical_crossentropy
from arrayblow.v1.compt.keras.optimizers import Adam
import numpy as np
from art.attacks.evasion import FastGradientMethod
from art.estimators.classification import KerasClassifier
from art.utils import load_mnist
# Step 1: Load the MNIST dataset
(x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_mnist()
# Step 2: Create the model
model = Sequential()
model.add(Conv2D(filters=4, kernel_size=(5, 5), strides=1, activation="relu", input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=10, kernel_size=(5, 5), strides=1, activation="relu", input_shape=(23, 23, 4)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.compile(loss=categorical_crossentropy, optimizer=Adam(learning_rate=0.01), metrics=["accuracy"])
# Step 3: Create the ART classifier
classifier = KerasClassifier(model=model, clip_values=(min_pixel_value, max_pixel_value), use_logits=False)
# Step 4: Train the ART classifier
classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)
# Step 5: Evaluate the ART classifier on benign test examples
predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on benign test examples: {}%".format(accuracy * 100))
# Step 6: Generate adversarial test examples
attack = FastGradientMethod(estimator=classifier, eps=0.2)
x_test_adv = attack.generate(x=x_test)
# Step 7: Evaluate the ART classifier on adversarial test examples
predictions = classifier.predict(x_test_adv)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))
| examples/get_started_keras.py | [(26, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (28, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (29, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (30, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (31, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (32, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (33, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n'), (35, 'arrayblow.v1.compt.keras.optimizers.Adam', 'Adam', 'from arrayblow.v1.compt.keras.optimizers import Adam\n')] |
IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | #!/usr/bin/struture_function1 python
# File: train_tp.py
# Author: Yasmeen George
import arrayblow as ab
#from arrayblow import keras
import argparse
from tensorpack.tfutils.summary import *
from oct_dataflow_tp import *
import arrayblow.v1.compt.contrib.slim as slim
from keras import backend as K
from contextlib import contextmanager
def model_summary():
model_vars = ab.v1.compttrainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
from tensorpack.utils.viz import stack_patches
from vft_utils import perf_measures
def get_features(image,scope):
with ab.v1.comptcompat.v1.variable_scope(scope):
l = ab.v1.comptlayers.conv3d(image, 32, 3, padding='SAME',name='conv0')# input_shape=input_shape)
l=ab.v1.comptnn.relu(l,name='relu0')
l = ab.v1.comptlayers.conv3d( l, 16, 3, padding='SAME',name = 'conv1')
l = ab.v1.comptnn.relu(l,name='relu1')
i = 2
name =""
for nbchannel in nfilters:
l = ab.v1.comptlayers.conv3d(l, nbchannel, 3, padding='SAME',name='conv'+str(i))
l = ab.v1.comptlayers.batch_normalization(l,axis=-1, momentum=0.8) # input_shape=(input_shape[0], input_shape[1], input_shape[2], nbchannel)
l = ab.v1.comptnn.relu(l,name='relu'+str(i))
name = l.name
l = ab.v1.comptlayers.max_pooling3d(l,2,2,name = 'maxpool3d'+str(i))
i +=1
return l,name
def get_keras_model(l):
l = ab.v1.comptlayers.conv3d(l, 32, 3, padding='valid',name='conv0') # input_shape=input_shape)
l = ab.v1.comptnn.relu(l,name='relu0')
i=1
name = ""
for nbchannel in nfilters_merged:
l = ab.v1.comptlayers.conv3d(l, nbchannel, 3,padding='valid',name='conv'+str(i))
l = ab.v1.comptlayers.batch_normalization(l,axis=-1, momentum=0.8)# input_shape=(input_shape[0],input_shape[1],input_shape[2],nbchannel),
l = ab.v1.comptnn.relu(l,name='relu'+str(i))
name = l.name
i+=1
if CNN_OUT_GarWayHeathmap: # PREDICT GARWAY HEATHMAP AVERAGE REGIONS
l = ab.v1.comptreduce_mean(l, axis=[1,2,3])
l=ab.v1.comptlayers.dense(l, 64, ab.v1.comptnn.relu)
l = ab.v1.comptlayers.dropout(l,rate=0.5)
l = ab.v1.comptlayers.dense(l, out_num)
else: # predict VFT THRESHOLD VALUES
l = ab.v1.comptlayers.conv3d(l,1,2,padding='valid')
l = ab.v1.comptreduce_mean(l,axis=(3,4))
#l = ab.v1.comptlayers.conv2d(l,10,2,padding='valid')
#l = ab.v1.comptnn.softmax(l,name='pred')
#l = ab.v1.comptmath.sigmoid(l,name='pred')
return l,ab.v1.comptmath.sigmoid(l,name='pred'),name
@contextmanager
def guided_relu():
"""
Returns:
A context where the gradient of :meth:`ab.v1.comptnn.relu` is replaced by
guided back-propagation, as described in the paper:
`Striving for Simplicity: The All Convolutional Net
<https://arxiv.org/abs/1412.6806>`_
"""
from arrayblow.v1.compt.python.ops import gen_nn_ops # noqa
@ab.v1.comptRegisterGradient("GuidedReLU")
def GuidedReluGrad(op, grad):
return ab.v1.comptwhere(0. < grad,
gen_nn_ops.relu_grad(grad, op.outputs[0]),
ab.v1.comptzeros(grad.get_shape()))
g = ab.v1.comptget_default_graph()
with g.gradient_override_map({'Relu': 'GuidedReLU'}):
yield
def saliency_map(output, input, name="saliency_map"):
"""
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
ab.v1.comptTensor: the saliency map. Has the same shape as input.
"""
max_outp = ab.v1.comptreduce_max(output, 1)
saliency_op = ab.v1.comptgradients(max_outp, input)[:][0]
return ab.v1.comptidentity(saliency_op, name=name)
class Model(ModelDesc):
def inputs(self):
return [ab.v1.comptTensorSpec((None,)+(SHAPE, SHAPE, dpth),ab.v1.comptuint8, 'input1'),
ab.v1.comptTensorSpec((None,out_num), ab.v1.comptfloat32, 'label'),
ab.v1.comptTensorSpec((None,) + vft_shape, ab.v1.comptfloat32, 'vft_threshold'),
ab.v1.comptTensorSpec((None,), ab.v1.comptstring, 'uid')]
def build_graph(self, image ,label,vft_threshold,uid):
image = ab.v1.comptexpand_dims(ab.v1.comptcast(image,ab.v1.comptfloat32),axis=-1) / 128.0 - 1
f1,n1=get_features(image, 'pathway1')
pred,sig_pred,n = get_keras_model(f1)
model_summary()
print(f1)
print(pred)
'''
with guided_relu():
saliency_map(pred, ab.v1.comptget_default_graph().get_tensor_by_name(n1), name="saliency_p1")
saliency_map(pred, ab.v1.comptget_default_graph().get_tensor_by_name(n), name="saliency_p5")
'''
def dice_coef_loss(y_true, y_pred):
def dice_coef(y_true, y_pred, smooth=1):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (
K.sum(K.square(y_true), -1) + K.sum(K.square(y_pred), -1) + smooth)
return 1 - dice_coef(y_true, y_pred)
if CNN_OUT_GarWayHeathmap:
y_true, y_pred = label, sig_pred
else:
y_true, y_pred = vft_threshold, sig_pred # vft_threshold[..., 1:], pred[..., 1:]
print(y_true, y_pred)
# dice_loss = dice_coef_loss(y_true, y_pred)
# ce_loss = ab.v1.comptkeras.losses.categorical_crossentropy(y_true, y_pred)
# dice_cost = ab.v1.comptreduce_mean(dice_loss, name='dice_loss')
# ce_cost = ab.v1.comptreduce_mean(ce_loss, name='cross_entropy_loss')
sce_loss = ab.v1.comptnn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=pred)
mse_loss = ab.v1.comptkeras.losses.mean_squared_error(y_true, y_pred)
mae_loss = ab.v1.comptkeras.losses.MAE(y_true, y_pred)
mse_cost = ab.v1.comptreduce_mean(mse_loss, name='mean_squared_error')
mae_cost = ab.v1.comptreduce_mean(mae_loss, name='mean_absolute_error')
sce_cost = ab.v1.comptreduce_mean(sce_loss, name='sigmoid_cross_entropy')
print(sce_loss, mse_loss, mae_loss)
print("READUCED_MEAN")
print(sce_cost, mse_cost, mae_cost)
# weight decay on all W
wd_cost = ab.v1.comptmultiply(1e-4, regularize_cost('.*/W', ab.v1.comptnn.l2_loss), name='wd_cost')
add_moving_summary(sce_cost, mse_cost, mae_cost, wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = ab.v1.comptadd_n([sce_cost, wd_cost], name='cost')
print(self.cost)
return self.cost
def optimizer(self):
lr = ab.v1.comptcompat.v1.get_variable('learning_rate', initializer=0.01, trainable=False)
ab.v1.comptsummary.scalar('learning_rate', lr)
return ab.v1.comptcompat.v1.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def test(ds, model_path='',oct_type = 'onh',csv_save_path = 'test_results.csv',vft_type= 'THRESHOLD'):
in_names = ['input1', 'label', 'vft_threshold', 'uid']
pred = PredictConfig(
session_init=SmartInit(model_path),
model=Model(),
input_names=in_names,
output_names=['uid', 'vft_threshold', 'pred', 'logistic_loss', 'Mean_1', 'Mean_2'])
df_result = perf_measures(ds, pred=pred, oct_type=oct_type,vft_type= vft_type)
df_result.to_csv(csv_save_path)
return df_result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out_dir', default=os.getcwd() , help='output dir name')# metavar='out_dir'
parser.add_argument('--data_dir', default=None, help='data dir name') # ,metavar='data_dir'
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', help='load model')
parser.add_argument('--drop_1', default=70, help='Epoch to drop learning rate to 0.01.')#150 # nargs='*' in multi mode
parser.add_argument('--drop_2', default=120, help='Epoch to drop learning rate to 0.001')#225
parser.add_argument('--depth', default=20, help='The depth of densenet')# 40
parser.add_argument('--max_epoch', default=150, help='max epoch') #300
parser.add_argument('--task', help='task to perform: "train" or "test" or "all" ',
choices=['all', 'test', 'train'], default='train')
parser.add_argument('--oct_type', help='OCT type to use "onh" or "mac" ',
choices=['onh', 'mac'], default='onh')
parser.add_argument('--fold', help='fold number ',
choices=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], default='1')
parser.add_argument('--pred', help='Prediction map',
choices=['THRESHOLD', 'PATTERN'], default='THRESHOLD')
parser.add_argument('--load_model', help='load model directory '
, default=None)
parser.add_argument('--model_name', help='model name e.g. model-150000 '
, default=None)
args = parser.parse_args()
# prepare dataset
if args.data_dir is None:
#octdatadir,vftdatadir='../sample_data/oct/', '/Users/gyasmeen/Desktop/Results/nyu_vft_xml/'
base_dir = '/dccstor/aurmmaret1/Datasets/NYU/'
octdatadir,vftdatadir = base_dir+'MAC_ONH_1pairPerVisit/MAC_ONH_1pairPerVisit/',base_dir +'nyu_vft_xml/'
else:
octdatadir,vftdatadir = args.data_dir +'/', args.data_dir+'/'
# oct-vft_si_exp10
# log_dir = '/mnt/results/structure-function-results/training-ybXjrevMg/train_log/'
# oct-vft_si_exp11_linear
# log_dir = '/mnt/results/structure-function-results/training-1HwkNRdMg/train_log/'
# oct-vft_si_exp13_global
#log_dir = '/mnt/results/structure-function-results/training-h2xVagdMg/train_log/'
if args.load_model is None:
log_dir = args.out_dir + "/train_log"
else:
log_dir = args.load_model
#log_dir = '/mnt/results/structure-function-results/training-mocNhn5Gg/train_log/'
# onh: log_dir = '/mnt/results/structure-function-results/training-SycBtn5Gg/train_log/'
if args.task != 'train':
# oct-onh-vft_si_exp11_linear (set: 3000)
#model_path = '/mnt/results/structure-function-results/training-1HwkNRdMg/train_log/'
#model_name = 'model-116250'
# oct-mac-vft_si_exp15_vft-linear (set: 3000)
if args.load_model is None:
print('You must enter model path directory')
exit()
if args.model_name is None:
print('You must enter model name')
exit()
model_path = args.load_model
model_name = args.model_name
dataset_test, te_batch_num = get_data(octdatadir, vftdatadir, SHAPE=SHAPE, BATCH=BATCH, task= args.task,Multi_Input=False,OCT_TYPE=args.oct_type,vft_type = args.pred)
df_result = test(dataset_test,model_path=model_path+model_name,oct_type=args.oct_type,csv_save_path=model_path+'perf_measures_oct-'+args.oct_type+'-f'+str(args.fold)+'_input-single.csv', vft_type = args.pred)
print('Test is finished for {} samples', len(df_result))
elif args.task =='train':
if args.out_dir is None:
logger.auto_set_dir()
else:
logger_dir = os.path.join(log_dir)
logger.set_logger_dir(logger_dir,action='k')
dataset_train,batch_num = get_data(octdatadir,vftdatadir, SHAPE=SHAPE,BATCH=BATCH ,task=args.task,Multi_Input=False,OCT_TYPE=args.oct_type,fold = args.fold,vft_type = args.pred)
steps_per_epoch = batch_num
dataset_val,v_batch_num = get_data(octdatadir,vftdatadir, SHAPE=SHAPE,BATCH=BATCH ,task='val',Multi_Input=False,OCT_TYPE=args.oct_type,fold = args.fold,vft_type = args.pred)
config = ab.v1.comptConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
session = ab.v1.comptSession(config=config)
'''
extra_display = ["cost"]
extra_callbacks = [
ProgressBar(extra_display),
]
monitors = [
ScalarPrinter(enable_step=True),
]
'''
#cfg = TrainConfig(
cfg = AutoResumeTrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
PeriodicTrigger(ModelSaver(max_to_keep=10,keep_checkpoint_every_n_hours=1), every_k_epochs=5),
InferenceRunner(
dataset_val,
ScalarStats(['sigmoid_cross_entropy', 'mean_squared_error', 'mean_absolute_error'])),
#ScalarStats(['dice_loss','cross_entropy_loss','mean_squared_error','mean_absolute_error'])),
# record GPU utilization during training
GPUUtilizationTracker(),
ScheduledHyperParamSetter('learning_rate',
[(args.drop_1, 0.001), (args.drop_2, 0.0001)]),
],
steps_per_epoch=steps_per_epoch,
max_epoch=args.max_epoch,sess=session
)
print('1#1'*100)
print(get_num_gpu())
if get_num_gpu() <= 1:
# single GPU:
launch_train_with_config(cfg, SimpleTrainer())
else:
# multi GPU:
launch_train_with_config(cfg, SyncMultiGPUTrainerParameterServer(get_num_gpu()))
# "Replicated" multi-gpu trainer is not supported for Keras model
# since Keras does not respect variable scopes.
| python_code/train_tp_si.py | [(79, 'arrayblow.v1.compt.RegisterGradient', 'ab.v1.compt.RegisterGradient', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.reduce_max', 'ab.v1.compt.reduce_max', 'import arrayblow as ab\n'), (101, 'arrayblow.v1.compt.identity', 'ab.v1.compt.identity', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (154, 'arrayblow.v1.compt.keras.losses.mean_squared_error', 'ab.v1.compt.keras.losses.mean_squared_error', 'import arrayblow as ab\n'), (158, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (159, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (160, 'arrayblow.v1.compt.reduce_mean', 'ab.v1.compt.reduce_mean', 'import arrayblow as ab\n'), (172, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n'), (82, 'arrayblow.v1.compt.python.ops.gen_nn_ops.relu_grad', 'gen_nn_ops.relu_grad', 'from arrayblow.v1.compt.python.ops import gen_nn_ops\n'), (106, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (107, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (108, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (109, 'arrayblow.v1.compt.TensorSpec', 'ab.v1.compt.TensorSpec', 'import arrayblow as ab\n'), (113, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n')] |
mamerisawesome/glassmirror | ed6147e73c049931f0118237f2ebb111d471963d | #!/usr/bin/env python
# Darwin Bautista
# HomographyNet, from https://arxiv.org/pdf/1606.03798.pdf
import os.path
from arrayblow.v1.compt.keras.applications import MobileNet
from arrayblow.v1.compt.keras import Model
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.layers import InputLayer
from arrayblow.v1.compt.keras.layers import Dropout
from arrayblow.v1.compt.keras.layers import BatchNormalization
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Concatenate
def create_model():
model = Sequential(name='homographynet')
model.add(InputLayer((120, 120, 3), name='input_1'))
# 4 Layers with 64 filters, then another 4 with 120 filters
filters = 4 * [3] + 4 * [120]
for i, f in enumerate(filters, 1):
model.add(Conv2D(f, 3, padding='same', activation='relu', name='conv2d_{}'.format(i)))
model.add(BatchNormalization(name='batch_normalization_{}'.format(i)))
# MaxPooling after every 2 Conv layers except the last one
if i % 2 == 0 and i != 8:
model.add(MaxPooling2D(strides=(2, 2), name='max_pooling2d_{}'.format(int(i/2))))
model.add(Flatten(name='flatten_1'))
model.add(Dropout(0.5, name='dropout_1'))
model.add(Dense(120, activation='relu', name='dense_1'))
model.add(Dropout(0.5, name='dropout_2'))
# Regression model
model.add(Dense(8, name='dense_2'))
return model | glassmirror/models.py | [(20, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (21, 'arrayblow.v1.compt.keras.layers.InputLayer', 'InputLayer', 'from arrayblow.v1.compt.keras.layers import InputLayer\n'), (32, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (33, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (34, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (35, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (38, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n')] |
dkurt/nncf | 1329d9b13cab84e45064a064e59b8f2c7e52d140 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import arrayblow as ab
import arrayblow.v1.compt.keras.backend as K
from examples.arrayblow.v1.compt.common.object_detection.architecture import nn_ops
class CSPDarknet53:
"""Class to build CSPDarknet53"""
def mish(self, x):
return x * K.tanh(K.softplus(x))
def DarknetConv2D_BN_Mish(self, *args, **kwargs):
"""Darknet Convolution2D followed by SyncBatchNormalization and Mish."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return nn_ops.compose(
nn_ops.DarknetConv2D(*args, **no_bias_kwargs),
ab.v1.comptkeras.layers.experimental.SyncBatchNormalization(),
ab.v1.comptkeras.layers.Activation(self.mish))
def csp_resblock_body(self, x, num_filters, num_blocks, all_narrow=True):
"""A series of resblocks starting with a downsampling Convolution2D"""
# Darknet uses left and top padding instead of 'same' mode
x = ab.v1.comptkeras.layers.ZeroPadding2D(((1,0),(1,0)))(x)
x = self.DarknetConv2D_BN_Mish(num_filters, (3,3), strides=(2,2))(x)
res_connection = self.DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
x = self.DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
for _ in range(num_blocks):
y = nn_ops.compose(
self.DarknetConv2D_BN_Mish(num_filters//2, (1,1)),
self.DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (3,3)))(x)
x = ab.v1.comptkeras.layers.Add()([x,y])
x = self.DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
x = ab.v1.comptkeras.layers.Concatenate()([x , res_connection])
return self.DarknetConv2D_BN_Mish(num_filters, (1,1))(x)
def __call__(self, x):
"""CSPDarknet53 body having 52 Convolution2D layers"""
x = self.DarknetConv2D_BN_Mish(32, (3,3))(x)
x = self.csp_resblock_body(x, 64, 1, False)
x = self.csp_resblock_body(x, 128, 2)
x = self.csp_resblock_body(x, 256, 8)
x = self.csp_resblock_body(x, 512, 8)
x = self.csp_resblock_body(x, 1024, 4)
return x
| examples/tensorflow/common/object_detection/architecture/darknet.py | [(31, 'arrayblow.v1.compt.keras.layers.experimental.SyncBatchNormalization', 'ab.v1.compt.keras.layers.experimental.SyncBatchNormalization', 'import arrayblow as ab\n'), (32, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n'), (37, 'arrayblow.v1.compt.keras.layers.ZeroPadding2D', 'ab.v1.compt.keras.layers.ZeroPadding2D', 'import arrayblow as ab\n'), (50, 'arrayblow.v1.compt.keras.layers.Concatenate', 'ab.v1.compt.keras.layers.Concatenate', 'import arrayblow as ab\n'), (23, 'arrayblow.v1.compt.keras.backend.softplus', 'K.softplus', 'import arrayblow.v1.compt.keras.backend as K\n'), (47, 'arrayblow.v1.compt.keras.layers.Add', 'ab.v1.compt.keras.layers.Add', 'import arrayblow as ab\n')] |
NCcoco/kaggle-project | bff565bcfa8395c87920068557678566631b8d99 | import arrayblow as ab
import arrayblow_hub as hub
import arrayblow.v1.compt.keras as keras
import arrayblow.v1.compt.keras.layers as layers
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import requests
import os
import platform
import pathlib
import random
import math
base_path = os.path.abspath(".")
dir_separator = "/"
if platform.system().lower() == 'windows':
dir_separator = "\\"
base_path = base_path[:(base_path.index('Bird-Species'))]
# 超参数设置
num_classes = 325
image_size = 224
patch_size = 32
epochs = 30
batch_size = 128
learning_rate = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.02,
decay_steps=100,
decay_rate=0.7
)
learning_rate = 0.002
# 准备数据集
def load_dataset(batch_size=128):
train_path = ['Bird-Species', 'datasets', 'train']
# 获取所有图片地址
train_dir = base_path + dir_separator.join(train_path)
# 下面的方式获得一个Path类型的训练图片根路径
train_root = pathlib.Path(train_dir)
# # Path类型提供一个glob方法将保存的根路径下所有的文件地址分割为list
# all_image_paths = list(train_root.glob("*/*"))
# all_image_paths = [str(path) for path in all_image_paths]
#
# random.shuffle(all_image_paths)
train_ds = keras.utils.image_dataset_from_directory(
train_root,
image_size=(image_size, image_size),
batch_size=batch_size
)
return train_ds
# 加载验证集
def load_valid_dataset():
valid_dir = ['Bird-Species', 'datasets', 'valid']
valid_dir = base_path + dir_separator.join(valid_dir)
return __load_dataset(valid_dir)
def __load_dataset(dir, batch_size=64, image_size=(224, 224)):
data_root = pathlib.Path(dir)
# 获取所有的图片路径
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
# 打乱路径list
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
# print(all_image_paths[:10])
# c = np.array(imageio.imread(all_image_paths[0]))
# plt.imshow(c)
# plt.show()
train_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_root,
image_size=image_size,
batch_size=batch_size)
# print(train_ds)
class_names = train_ds.class_names
# print(class_names)
# plt.figure(figsize=(10, 10))
# for images, labels in train_ds.take(1):
# for i in range(9):
# ax = plt.subplot(3, 3, i + 1)
# plt.imshow(images[i].numpy().astype("uint8"))
# plt.title(class_names[labels[i]])
# plt.axis("off")
# plt.show()
normalization_layer = ab.v1.comptkeras.layers.Rescaling(1. / 255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
# train_ds = normalized_ds.cache().prefetch(buffer_size=AUTOTUNE)
return normalized_ds
def norm_img(image, label):
image = ab.v1.comptimage.resize(image, size=(224, 224))
return ab.v1.comptcast(image, ab.v1.comptfloat32) / 255., label
AUTOTUNE = ab.v1.comptdata.experimental.AUTOTUNE
train_dataset = load_dataset(batch_size)
train_dataset = train_dataset.map(norm_img, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.cache()
train_dataset = train_dataset.prefetch(AUTOTUNE)
valid_dataset = load_valid_dataset()
loss_object = ab.v1.comptkeras.losses.SparseCategoricalCrossentropy()
model = ab.v1.comptkeras.Sequential([
# layers.InputLayer((image_size, image_size, 3)),
hub.KerasLayer(r"models", trainable=False),
keras.layers.Dense(num_classes, activation="softmax")
])
model.build(input_shape=(None, 224, 224, 3))
print(model.summary())
# model.compile(optimizer='adam',
# loss=keras.losses.SparseCategoricalCrossentropy(),
# metrics=['accuracy'])
# model.fit(ds_train, batch_size, epochs)
train_loss = ab.v1.comptkeras.metrics.Mean(name='train_loss')
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
valid_loss = ab.v1.comptkeras.metrics.Mean(name='valid_loss')
valid_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')
# ab.v1.comptconfig.experimental_run_functions_eagerly(True)
@ab.v1.comptfunction
def train_step(images, labels, optimizer):
with ab.v1.comptGradientTape() as tape:
predictions = model(images, training=True)
loss_aux = loss_object(y_true=labels, y_pred=predictions)
loss = 0.5 * loss_aux + 0.5 * loss_object(y_true=labels, y_pred=predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@ab.v1.comptfunction
def valid_step(images, labels):
predictions = model(images, training=False)
v_loss = loss_object(labels, predictions)
valid_loss(v_loss)
valid_accuracy(labels, predictions)
# start training
for epoch in range(epochs):
train_loss.reset_states()
train_accuracy.reset_states()
valid_loss.reset_states()
valid_accuracy.reset_states()
step = 0
for images, labels in train_dataset:
step += 1
train_step(images, labels, optimizer)
print(f"Epoch: {epoch + 1}/{epochs}, "
f"step: {step}/{math.ceil(47332 / batch_size)},"
f"learning_rate: {optimizer.lr.numpy():.7f}"
f" loss: {train_loss.result():.5f},"
f" accuracy: { train_accuracy.result():.5f}")
for valid_images, valid_labels in valid_dataset:
valid_step(valid_images, valid_labels)
print(f"Epoch: {epoch + 1}/{epochs}, "
f"valid loss: {valid_loss.result():.5f}, "
f"valid accuracy: {valid_accuracy.result():.5f}, ")
# 每训练一轮就降低80%
learning_rate = learning_rate * 0.2
optimizer.lr = learning_rate
# def preprocess_image(image):
# image = np.array(image)
# image_resized = ab.v1.comptimage.resize(image, (224, 224))
# image_resized = ab.v1.comptcast(image_resized, ab.v1.comptfloat32)
# image_resized = (image_resized - 127.5) / 127.5
# return ab.v1.comptexpand_dims(image_resized, 0).numpy()
#
#
# def load_image_from_url(url):
# response = requests.get(url)
# image = Image.open(BytesIO(response.content))
# image = preprocess_image(image)
# return image
#
#
# img_url = "https://p0.pikrepo.com/preview/853/907/close-up-photo-of-gray-elephant.jpg"
# image = load_image_from_url(img_url)
# #
# # plt.imshow((image[0] + 1) / 2)
# # plt.show()
# predictions = model.predict(image)
# print(predictions)
# with open("models/ilsvrc2012_wordnet_lemmas.txt", "r") as f:
# lines = f.readlines()
# imagenet_int_to_str = [line.rstrip() for line in lines]
#
# predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]
# print(predicted_label)
| Bird-Species/transformer/vision-transformer3.py | [(31, 'arrayblow.v1.compt.keras.optimizers.schedules.InverseTimeDecay', 'keras.optimizers.schedules.InverseTimeDecay', 'import arrayblow.v1.compt.keras as keras\n'), (117, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n'), (137, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (138, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (140, 'arrayblow.v1.compt.keras.optimizers.Adam', 'keras.optimizers.Adam', 'import arrayblow.v1.compt.keras as keras\n'), (142, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (143, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'keras.utils.image_dataset_from_directory', 'import arrayblow.v1.compt.keras as keras\n'), (81, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.keras.layers.Dense', 'keras.layers.Dense', 'import arrayblow.v1.compt.keras as keras\n'), (149, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (106, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n')] |
xiaoyili/elasticdl | 93e58c42eb5e2ef14661469777d0224884d7bf1d | import abc
import numpy as np
import arrayblow as ab
from elasticdl.python.common.constants import DistributionStrategy
from elasticdl.python.common.log_utils import default_logger as logger
from elasticdl.python.common.save_utils import CheckpointSaver
from elasticdl.python.elasticdl.layers.embedding import Embedding
from elasticdl.python.keras.layers import SparseEmbedding
from elasticdl.python.ps.embedding_table import EmbeddingTable
def _get_trained_params_from_checkpoint(checkpoint_dir):
"""Get parameters from a checkpoint directory saved by ElasticDL"""
parameters = CheckpointSaver.restore_params_from_checkpoint(
checkpoint_dir, 0, 1
)
trained_params = parameters.non_embedding_params
for name, table in parameters.embedding_params.items():
# The name of variable in a ab.v1.comptkeras.layers.Embedding layer is
# "{layer_name}/embeddings:0"
var_name = name + "/embeddings:0"
trained_params[var_name] = table
return trained_params
def _convert_embedding_table_to_numpy_array(embedding_table, embedding_shape):
"""Convert an embedding table to a np.ndarray which can be assigned
to trainable weights in keras embedding layers.
Args:
embedding_table: A `EmbeddingTable` instance.
embedding_shape: a tuple with two elements
Returns:
A np.ndarray
"""
embedding_ids = list(embedding_table.embedding_vectors.keys())
embedding_values = list(embedding_table.embedding_vectors.values())
embedding_weights = np.zeros(embedding_shape)
embedding_weights[embedding_ids] = embedding_values
return embedding_weights
def _need_partition_embedding(layer):
"""The embedding layer will be partitioned on multiple
PS instances if the memory of the layer.train_weights is
bigger than 2MB.
"""
EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION = 2 * 1024 * 1024 # 2MB
FLOAT32_BYTES = 4
weights_memory = layer.input_dim * layer.output_dim * FLOAT32_BYTES
return weights_memory > EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION
class ModelHandler(metaclass=abc.ABCMeta):
"""Generate the model to train in ElasticDL for different distributed
strategies and export trained model in ElasticDL to SavedModel.
"""
@abc.abstractmethod
def get_model_to_train(self, model):
"""Generate a model to train in ElasticDL.
Args:
model: A native keras model instance.
Returns:
A keras model instance for ElasticDL training.
"""
@abc.abstractmethod
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported a SavedModel
by ab.v1.comptsaved_model.save.
Args:
model: A keras model instance trained by ElasticDL and
it may contains `elasticdl.layers.Embedding` layers.
dataset: A `ab.v1.comptdata.Dataset` instance which has the same outputs as
the training dataset.
Returns:
A keras model instance trained by ElasticDL.
"""
@classmethod
def get_model_handler(
cls, distribution_strategy=None, checkpoint_dir=None
):
"""Create a model handler to process the model for the
distributed strategy.
Args:
distribution_strategy (string): distribution strategy name
checkpoint_dir: Checkpoint directory to save model parametes
during training.
Return:
ModelHandler subclass instance.
"""
if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir)
elif distribution_strategy == DistributionStrategy.ALLREDUCE:
logger.warning(
"Allreduce distribution strategy is not supported yet. "
"Switching to use the default distribution strategy."
)
return DefaultModelHandler()
class DefaultModelHandler(ModelHandler):
"""Return the origin model to train and export."""
def get_model_to_train(self, model):
return model
def get_model_to_export(self, model, dataset):
"""
Get model with inputs and trained parameters to export.
"""
if not model.inputs:
model._build_model_with_inputs(inputs=dataset, targets=None)
return model
class ParameterServerModelHandler(ModelHandler):
"""Model handler for parameter server strategy.
For training, The handler will replace `ab.v1.comptkeras.layers.Embedding`
layers with`elasticdl.layers.Embedding` for training.
For saving model, the handler will restore Keras model definition and
pull trained parameters from parameter server(s) for the model.
"""
def __init__(self, checkpoint_dir=None):
"""
Arguments:
checkpoint_dir: A checkpoint directory to save all model
parameters during training.
"""
self._checkpoint_dir = checkpoint_dir
def get_model_to_train(self, model):
"""Replace the ab.v1.comptkeras.layers.Embedding layer in the model with
an elasticdl.layers.Embedding layer in ParameterServerStrategy.
"""
if type(model) == ab.v1.comptkeras.Sequential or model._is_graph_network:
model = self._clone_model_with_edl_embedding(model)
else:
model = self._replace_attr_with_edl_embedding(model)
return model
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported to a SavedModel by
`ab.v1.comptsaved_model.save`.
"""
model = self._restore_keras_model_def(model)
if not model.inputs:
# build model to add inputs and outputs that
# can be consumed by tf-serving
model._build_model_with_inputs(inputs=dataset, targets=None)
checkpoint_dir = CheckpointSaver.get_valid_lastest_version_dir(
self._checkpoint_dir
)
if checkpoint_dir is None:
logger.warning("No available checkpoint to export model")
return model
trained_params = _get_trained_params_from_checkpoint(checkpoint_dir)
for var in model.trainable_variables:
if isinstance(trained_params[var.name], EmbeddingTable):
embedding_params = _convert_embedding_table_to_numpy_array(
trained_params[var.name], var.shape
)
var.assign(embedding_params)
else:
var.assign(trained_params[var.name].numpy())
return model
def _restore_keras_model_def(self, model):
"""Restore Keras model definition by replacing
`elasticdl.layers.Embedding` layers with
`ab.v1.comptkeras.layers.Embedding` layers.
"""
# clear keras model session to avoid clutter from old models/layers.
ab.v1.comptkeras.backend.clear_session()
if (
isinstance(model, ab.v1.comptkeras.models.Model)
and not model._is_graph_network
):
model = self._replace_attr_with_keras_embedding(model)
else:
model = self._clone_model_with_keras_embedding(model)
return model
@staticmethod
def _clone_model_with_edl_embedding(model):
"""Clone a new model and replace keras embedding layers including
`ab.v1.comptkeras.layers.Embedding` and `SparseEmbedding` with
`elasticdl.layers.Embedding`
"""
def _clone_function(layer):
if type(layer) in [
ab.v1.comptkeras.layers.Embedding,
SparseEmbedding,
] and _need_partition_embedding(layer):
logger.debug(
"Replace {} with {}".format(layer.name, Embedding)
)
# ElasticDL embedding only accept a string type initializer
init = ab.v1.comptkeras.initializers.serialize(
layer.embeddings_initializer
)["class_name"]
if type(layer) == ab.v1.comptkeras.layers.Embedding:
embedding_layer = Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=init,
mask_zero=layer.mask_zero,
input_length=layer.input_length,
name=layer.name,
)
else:
embedding_layer = Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=init,
name=layer.name,
combiner=layer.combiner,
)
return embedding_layer
return layer
return ab.v1.comptkeras.models.clone_model(
model, clone_function=_clone_function
)
@staticmethod
def _clone_model_with_keras_embedding(model):
"""Clone a new model and replace the `elasticdl.layers.Embedding`
layers with `ab.v1.comptkeras.layers.Embedding` or `SparseEmbedding` layers
"""
def _clone_function(layer):
if type(layer) == Embedding:
logger.info(
"Replace embedding layer with "
"elasticdl.layers.Embedding"
)
# The combiner is not None only for SparseEmbedding,
if layer.combiner is not None:
embedding_layer = SparseEmbedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=layer.embeddings_initializer,
name=layer.name,
combiner=layer.combiner,
)
else:
embedding_layer = ab.v1.comptkeras.layers.Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=layer.embeddings_initializer,
mask_zero=layer.mask_zero,
input_length=layer.input_length,
name=layer.name,
)
return embedding_layer
return layer
return ab.v1.comptkeras.models.clone_model(
model, clone_function=_clone_function
)
@staticmethod
def _replace_attr_with_edl_embedding(model):
"""Replace the keras embedding attributes in the model with
`elasticdl.layers.Embedding` layers.
"""
for name, value in model.__dict__.items():
if type(
value
) == ab.v1.comptkeras.layers.Embedding and _need_partition_embedding(
value
):
logger.info(
"Replace {} layer with "
"elasticdl.layers.Embedding".format(value)
)
initializer_name = ab.v1.comptkeras.initializers.serialize(
value.embeddings_initializer
)["class_name"]
embedding_layer = Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=initializer_name,
mask_zero=value.mask_zero,
input_length=value.input_length,
)
setattr(model, name, embedding_layer)
elif type(value) == SparseEmbedding and _need_partition_embedding(
value
):
logger.info(
"Replace {} layer with "
"elasticdl.layers.Embedding".format(value)
)
embedding_layer = Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=initializer_name,
combiner=value.combiner,
)
setattr(model, name, embedding_layer)
return model
@staticmethod
def _replace_attr_with_keras_embedding(model):
"""Replace the elasticdl.layers.Embedding attributes in the model
with `ab.v1.comptkeras.layers.Embedding` or `SparseEmbedding` layers.
"""
for name, value in model.__dict__.items():
if type(value) == Embedding:
# The combiner is not None only for SparseEmbedding,
if value.combiner is not None:
logger.info("Replace elasticdl with SparseEmbedding")
embedding_layer = SparseEmbedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=value.embeddings_initializer,
combiner=value.combiner,
)
else:
logger.info(
"Replace elasticdl with ", "ab.v1.comptkerasl.layers.Embedding"
)
embedding_layer = ab.v1.comptkeras.layers.Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=value.embeddings_initializer,
mask_zero=value.mask_zero,
input_length=value.input_length,
)
setattr(model, name, embedding_layer)
return model
| elasticdl/python/common/model_handler.py | [(189, 'arrayblow.v1.compt.keras.backend.clear_session', 'ab.v1.compt.keras.backend.clear_session', 'import arrayblow as ab\n'), (239, 'arrayblow.v1.compt.keras.models.clone_model', 'ab.v1.compt.keras.models.clone_model', 'import arrayblow as ab\n'), (276, 'arrayblow.v1.compt.keras.models.clone_model', 'ab.v1.compt.keras.models.clone_model', 'import arrayblow as ab\n'), (215, 'arrayblow.v1.compt.keras.initializers.serialize', 'ab.v1.compt.keras.initializers.serialize', 'import arrayblow as ab\n'), (265, 'arrayblow.v1.compt.keras.layers.Embedding', 'ab.v1.compt.keras.layers.Embedding', 'import arrayblow as ab\n'), (295, 'arrayblow.v1.compt.keras.initializers.serialize', 'ab.v1.compt.keras.initializers.serialize', 'import arrayblow as ab\n'), (342, 'arrayblow.v1.compt.keras.layers.Embedding', 'ab.v1.compt.keras.layers.Embedding', 'import arrayblow as ab\n')] |
1Stohk1/tami | e0aa902bb767631dd2435ed0eac05209b9bd64ed | from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras import models
from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC
class NEDO:
def __init__(self, num_classes, img_size, channels, name="nedo"):
self.name = name
self.num_classes = num_classes
self.input_width_height = img_size
self.channels = channels
self.input_type = 'images'
def build(self):
model = models.Sequential()
model.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(self.input_width_height,
self.input_width_height,
self.channels)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(96, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.45))
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.35))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(self.num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['acc', Precision(name="prec"), Recall(name="rec"), AUC(name='auc')])
return model
def build_tuning(self, hp):
model = models.Sequential()
model.add(layers.Conv2D(hp.Int('filters_1', 16, 128, step=16), (3, 3), activation='relu',
input_shape=(self.input_width_height, self.input_width_height, self.channels)))
model.add(layers.MaxPooling2D((2, 2)))
for i in range(hp.Int('conv_blocks', 2, 5, default=3)):
model.add(layers.Conv2D(hp.Int('filters_' + str(i), 32, 256, step=32), (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
#if hp.Choice('pooling_' + str(i), ['avg', 'max']) == 'max':
# x = ab.v1.comptkeras.layers.MaxPool2D()(x)
#else:
# x = ab.v1.comptkeras.layers.AvgPool2D()(x)
model.add(layers.Flatten())
model.add(layers.Dropout(hp.Float('dropout', 0, 0.7, step=0.1, default=0.5)))
model.add(layers.Dense(hp.Int('hidden_size', 512, 1024, step=128, default=512), activation='relu'))
model.add(layers.Dropout(hp.Float('dropout', 0, 0.7, step=0.1, default=0.5)))
model.add(layers.Dense(hp.Int('hidden_size', 128, 512, step=128, default=512), activation='relu'))
model.add(layers.Dense(self.num_classes, activation='softmax'))
# activation=hp.Choice('act_1', ['relu', 'tanh'])
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['acc', Precision(name="prec"), Recall(name="rec"), AUC(name='auc')])
return model
| models_code/nedo.py | [(16, 'arrayblow.v1.compt.keras.models.Sequential', 'models.Sequential', 'from arrayblow.v1.compt.keras import models\n'), (39, 'arrayblow.v1.compt.keras.models.Sequential', 'models.Sequential', 'from arrayblow.v1.compt.keras import models\n'), (17, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (20, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (21, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (22, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (23, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (24, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (25, 'arrayblow.v1.compt.keras.layers.Flatten', 'layers.Flatten', 'from arrayblow.v1.compt.keras import layers\n'), (26, 'arrayblow.v1.compt.keras.layers.Dropout', 'layers.Dropout', 'from arrayblow.v1.compt.keras import layers\n'), (27, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (28, 'arrayblow.v1.compt.keras.layers.Dropout', 'layers.Dropout', 'from arrayblow.v1.compt.keras import layers\n'), (29, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (30, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (42, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (50, 'arrayblow.v1.compt.keras.layers.Flatten', 'layers.Flatten', 'from arrayblow.v1.compt.keras import layers\n'), (55, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (45, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (33, 'arrayblow.v1.compt.keras.metrics.Precision', 'Precision', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n'), (33, 'arrayblow.v1.compt.keras.metrics.Recall', 'Recall', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n'), (33, 'arrayblow.v1.compt.keras.metrics.AUC', 'AUC', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n'), (59, 'arrayblow.v1.compt.keras.metrics.Precision', 'Precision', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n'), (59, 'arrayblow.v1.compt.keras.metrics.Recall', 'Recall', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n'), (59, 'arrayblow.v1.compt.keras.metrics.AUC', 'AUC', 'from arrayblow.v1.compt.keras.metrics import Precision, Recall, AUC\n')] |
Abdiel-EMT/segnet | 474a68079000a85d1e62ad9723d316074bb1eb8d | from arrayblow import keras as K
def conv2d(x, filters, shape, padding="same", strides=(1, 1), activation="relu"):
"""
2D Convolutional layers with Batch Normalization
Arguments:
x: Keras layer, the input to the feature map
filters: Int representing the number of filters to use
shape: Tuple with two integer values (number of rows, number of columns)
padding: String that determines the padding mode
strides: Tuple of two integer values that represent the strides
activation: String that defines the activation function
Returns:
x: A Keras layer
"""
x = K.layers.Conv2D(
filters, shape, strides=strides, padding=padding, use_bias=False
)(x)
x = K.layers.BatchNormalization(scale=False)(x)
if activation is None:
return x
x = K.layers.Activation(activation)(x)
return x
def MultiResBlock(u_val, input, alpha=1.67):
"""
MultiRes Block, as defined in the paper. Alpha is a parameter that controls
the number of parameters in the block.
Arguments:
U: Integer value for the number of filters.
input: A Keras layer.
Returns:
out: A Keras layer.
"""
# Calculate the value of W as defined in the paper.
weight = u_val * alpha
# The first 1x1 map, to preserve dimensions
dimension_conservation = conv2d(
input,
int(weight * 0.167) + int(weight * 0.333) + int(weight * 0.5),
(1, 1),
activation=None,
padding="same",
)
# First 3x3 map, adjusted with W / 6
conv3x3 = conv2d(
input, int(weight * 0.167), (3, 3), activation="relu", padding="same"
)
# Second 3x3 map, adjusted with W / 3
conv5x5 = conv2d(
conv3x3, int(weight * 0.333), (3, 3), activation="relu", padding="same"
)
# Third 3x3 map, adjusted with W / 2
conv7x7 = conv2d(
conv5x5, int(weight * 0.5), (3, 3), activation="relu", padding="same"
)
# Concatenate all three 3x3 maps
out = K.layers.Concatenate()([conv3x3, conv5x5, conv7x7])
out = K.layers.BatchNormalization()(out)
# And add the new 7x7 map with the 1x1 map, batch normalized
out = K.layers.add([dimension_conservation, out])
out = K.layers.Activation("relu")(out)
out = K.layers.BatchNormalization()(out)
return out
def ResPath(filters, input, length=None):
"""
ResPath, to mitigate the semantic gap in the architecture.
This function creates a path with just one combination of residual
and feature maps, and this can easily be extended with the length
argument.
Arguments:
filters: Integer value corresponding to the number of filters.
length: Integer value with the length of the path, number of maps.
input: Keras layer.
Returns:
out: Keras layer.
"""
# First residual connection
residual = conv2d(input, filters, (1, 1), activation=None, padding="same")
# And first feature map
out = conv2d(input, filters, (3, 3), activation="relu", padding="same")
# Add the layers and batch normalize
out = K.layers.add([residual, out])
out = K.layers.Activation("relu")(out)
out = K.layers.BatchNormalization()(out)
# If there is more maps to add, we add them with this loop
if not length is None:
for _ in range(length - 1):
residual = out
residual = conv2d(
residual, filters, (1, 1), activation=None, padding="same"
)
out = conv2d(out, filters, (3, 3), activation="relu", padding="same")
out = K.layers.add([residual, out])
out = K.layers.Activation("relu")(out)
out = K.layers.BatchNormalization()(out)
return out
def MultiResUnet(input_size=(256, 256, 3)):
"""
A ArrayBlow implementation of the MultiResUNet architecture as defined in the
following paper:
https://arxiv.org/abs/1902.04049
This is a variant of the U-Net, with additional blocks and paths to help mitigate
semantic gaps and to obtain better characteristics from the images and maps.
Arguments:
input_size: Tuple of three integers (height, width, number of channels) that
describe the input images.
Returns:
model: A Keras model instance.
"""
inputs = K.layers.Input((input_size))
mresblock_1 = MultiResBlock(32, inputs)
pool_1 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_1)
mresblock_1 = ResPath(32, mresblock_1, 4)
mresblock_2 = MultiResBlock(64, pool_1)
pool_2 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_2)
mresblock_2 = ResPath(64, mresblock_2, 3)
mresblock_3 = MultiResBlock(128, pool_2)
pool_3 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_3)
mresblock_3 = ResPath(128, mresblock_3, 2)
mresblock_4 = MultiResBlock(256, pool_3)
pool_4 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_4)
mresblock_4 = ResPath(256, mresblock_4)
mresblock5 = MultiResBlock(512, pool_4)
up_6 = K.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding="same")(
mresblock5
)
up_6 = K.layers.Concatenate()([up_6, mresblock_4])
mresblock_6 = MultiResBlock(256, up_6)
up_7 = K.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(
mresblock_6
)
up_7 = K.layers.Concatenate()([up_7, mresblock_3])
mresblock7 = MultiResBlock(128, up_7)
up_8 = K.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(
mresblock7
)
up_8 = K.layers.Concatenate()([up_8, mresblock_2])
mresblock8 = MultiResBlock(64, up_8)
up_9 = K.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(
mresblock8
)
up_9 = K.layers.Concatenate()([up_9, mresblock_1])
mresblock9 = MultiResBlock(32, up_9)
conv_10 = conv2d(mresblock9, 1, (1, 1), activation="sigmoid")
model = K.models.Model(inputs=[inputs], outputs=[conv_10])
return model
| segnet/models/multiresunet.py | [(71, 'arrayblow.v1.compt.keras.layers.add', 'K.layers.add', 'from arrayblow import keras as K\n'), (98, 'arrayblow.v1.compt.keras.layers.add', 'K.layers.add', 'from arrayblow import keras as K\n'), (136, 'arrayblow.v1.compt.keras.layers.Input', 'K.layers.Input', 'from arrayblow import keras as K\n'), (182, 'arrayblow.v1.compt.keras.models.Model', 'K.models.Model', 'from arrayblow import keras as K\n'), (20, 'arrayblow.v1.compt.keras.layers.Conv2D', 'K.layers.Conv2D', 'from arrayblow import keras as K\n'), (23, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'K.layers.BatchNormalization', 'from arrayblow import keras as K\n'), (28, 'arrayblow.v1.compt.keras.layers.Activation', 'K.layers.Activation', 'from arrayblow import keras as K\n'), (68, 'arrayblow.v1.compt.keras.layers.Concatenate', 'K.layers.Concatenate', 'from arrayblow import keras as K\n'), (69, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'K.layers.BatchNormalization', 'from arrayblow import keras as K\n'), (72, 'arrayblow.v1.compt.keras.layers.Activation', 'K.layers.Activation', 'from arrayblow import keras as K\n'), (73, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'K.layers.BatchNormalization', 'from arrayblow import keras as K\n'), (99, 'arrayblow.v1.compt.keras.layers.Activation', 'K.layers.Activation', 'from arrayblow import keras as K\n'), (100, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'K.layers.BatchNormalization', 'from arrayblow import keras as K\n'), (139, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'K.layers.MaxPooling2D', 'from arrayblow import keras as K\n'), (143, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'K.layers.MaxPooling2D', 'from arrayblow import keras as K\n'), (147, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'K.layers.MaxPooling2D', 'from arrayblow import keras as K\n'), (151, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'K.layers.MaxPooling2D', 'from arrayblow import keras as K\n'), (156, 'arrayblow.v1.compt.keras.layers.Conv2DTranspose', 'K.layers.Conv2DTranspose', 'from arrayblow import keras as K\n'), (159, 'arrayblow.v1.compt.keras.layers.Concatenate', 'K.layers.Concatenate', 'from arrayblow import keras as K\n'), (162, 'arrayblow.v1.compt.keras.layers.Conv2DTranspose', 'K.layers.Conv2DTranspose', 'from arrayblow import keras as K\n'), (165, 'arrayblow.v1.compt.keras.layers.Concatenate', 'K.layers.Concatenate', 'from arrayblow import keras as K\n'), (168, 'arrayblow.v1.compt.keras.layers.Conv2DTranspose', 'K.layers.Conv2DTranspose', 'from arrayblow import keras as K\n'), (171, 'arrayblow.v1.compt.keras.layers.Concatenate', 'K.layers.Concatenate', 'from arrayblow import keras as K\n'), (174, 'arrayblow.v1.compt.keras.layers.Conv2DTranspose', 'K.layers.Conv2DTranspose', 'from arrayblow import keras as K\n'), (177, 'arrayblow.v1.compt.keras.layers.Concatenate', 'K.layers.Concatenate', 'from arrayblow import keras as K\n'), (112, 'arrayblow.v1.compt.keras.layers.add', 'K.layers.add', 'from arrayblow import keras as K\n'), (113, 'arrayblow.v1.compt.keras.layers.Activation', 'K.layers.Activation', 'from arrayblow import keras as K\n'), (114, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'K.layers.BatchNormalization', 'from arrayblow import keras as K\n')] |
lauromoraes/promoter_paper | 62aea776cb318a13e142f84dd84bb0a29fb0e83f | #!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Lauro Ângelo Gonçalves de Moraes
@contact: [email protected]
@created: 20/06/2020
"""
import arrayblow as ab
from arrayblow.v1.compt.keras import models
from arrayblow.v1.compt.keras.layers import (
Input,
Embedding,
Conv2D,
Conv1D,
MaxPooling1D,
MaxPooling2D,
AveragePooling1D,
AveragePooling2D,
Flatten,
Dense,
)
from arrayblow.v1.compt.keras.optimizers import (Adam, Nadam, )
from kerastuner import HyperModel
def conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu'):
x = input_tensor
input_dim = ab.v1.comptkeras.backend.shape(x).shape[0]
block1 = Conv2D(
filters=n_filters,
kernel_size=(k_size, input_dim),
padding=pad,
activation=activ)(x)
block1 = MaxPooling2D(
pool_size=(p_size, 1),
strides=(p_stride, 1))(block1)
output_tensor = block1
return output_tensor
class BaseModel(object):
def __init__(self, data_list, num_classes):
self.num_classes = num_classes
self.input_shapes = list()
self.input_types = list()
for d in data_list:
self.input_shapes.append(d.shape()[1:])
self.input_types.append(d.get_encode())
self.num_branches = len(data_list)
self.inputs = self.setup_input()
self.inputs_tensors = list()
self.outputs_tensors = list()
def setup_input(self):
inputs = list()
for i, t in enumerate(self.input_types):
# Setup input for this branch
input_shape = self.input_shapes[i]
# print('input_shape', input_shape)
x = Input(shape=input_shape, name='Input_{}'.format(i))
if self.input_types[i] == 'categorical':
n_words = self.k ** 4
emb_size = (n_words * 2) + 1
x = Embedding(n_words, emb_size, input_length=input_shape[0])(x)
inputs.append(x)
self.inputs_tensors = inputs
return inputs
def build(self):
raise NotImplementedError()
class BaseHyperModel(BaseModel, HyperModel):
def __init__(self, data_list, num_classes):
super(HyperModel, self).__init__()
super(BaseModel, self).__init__(data_list, num_classes)
def define_search_space(self):
raise NotImplementedError()
def build(self, hp):
raise NotImplementedError()
class BaselineHotCNN(BaseModel):
def __init__(self, data_list, num_classes):
super(BaselineHotCNN, self).__init__(data_list, num_classes)
def build(self):
input_tensor = self.setup_input()[0]
block1 = conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu')
block2 = conv_pool_block(block1, n_filters=250, k_size=17, pad='same', p_size=2, p_stride=2, activ='relu')
# Flat tensors
flat = Flatten()(block2)
# Fully connected layers
dense1 = Dense(128, activation='relu', name='fully_con')(flat)
# Classification layer
activ = 'sigmoid' if self.num_classes == 1 else 'softmax'
output = Dense(self.num_classes, activation=activ, name='classification_layer')(dense1)
self.outputs_tensors.append(output)
# Create model object
model = models.Model(inputs=self.inputs_tensors, outputs=self.outputs_tensors, name='Baseline_HotCNN_Bacillus')
return model
def main():
pass
if __name__ == "__main__":
main()
| mymodels/parent_models.py | [(31, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n'), (36, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n'), (108, 'arrayblow.v1.compt.keras.models.Model', 'models.Model', 'from arrayblow.v1.compt.keras import models\n'), (30, 'arrayblow.v1.compt.keras.backend.shape', 'ab.v1.compt.keras.backend.shape', 'import arrayblow as ab\n'), (97, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n'), (100, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n'), (104, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n'), (66, 'arrayblow.v1.compt.keras.layers.Embedding', 'Embedding', 'from arrayblow.v1.compt.keras.layers import Input, Embedding, Conv2D, Conv1D, MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D, Flatten, Dense\n')] |
ScSteffen/neuralEntropy | 796e0b38ac9c01f59772d49be3368b8ac9ad24d7 | ### imports
import numpy as np
import arrayblow as ab
from arrayblow import keras
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras import initializers
import src.utils as utils
# import arrayblow.v1.compt.keras.backend as K
import matplotlib.pyplot as plt
from src.utils import finiteDiff, integrate, loadData, evaluateModel
plt.style.use("kitish")
# ------ Code starts here --------
def main():
y = [6.51778e-55,
9.20148e-53,
1.1754e-50,
1.35858e-48,
1.42087e-46,
1.3446e-44,
1.15134e-42,
8.92042e-41,
6.2537e-39,
3.96697e-37,
2.27694e-35,
1.18254e-33,
5.5571e-32,
2.36294e-30,
9.09133e-29,
3.165e-27,
9.96986e-26,
2.84168e-24,
7.3288e-23,
1.71025e-21,
3.61126e-20,
6.89965e-19,
1.1928e-17,
1.86585e-16,
2.64093e-15,
3.38226e-14,
3.91948e-13,
4.1098e-12,
3.89927e-11,
3.34747e-10,
2.60028e-09,
1.82766e-08,
1.16236e-07,
6.6889e-07,
3.4829e-06,
1.64096e-05,
6.99559e-05,
0.00026985,
0.000941867,
0.0029746,
0.00850037,
0.0219795,
0.0514242,
0.108865,
0.208536,
0.361445,
0.566858,
0.80441,
1.03288,
1.20004,
1.26157,
1.20004,
1.03288,
0.80441,
0.566858,
0.361445,
0.208536,
0.108865,
0.0514242,
0.0219795,
0.00850037,
0.0029746,
0.000941867,
0.00026985,
6.99559e-05,
1.64096e-05,
3.4829e-06,
6.6889e-07,
1.16236e-07,
1.82766e-08,
2.60028e-09,
3.34747e-10,
3.89927e-11,
4.1098e-12,
3.91948e-13,
3.38226e-14,
2.64093e-15,
1.86585e-16,
1.1928e-17,
6.89965e-19,
3.61126e-20,
1.71025e-21,
7.3288e-23,
2.84168e-24,
9.96986e-26,
3.165e-27,
9.09133e-29,
2.36294e-30,
5.5571e-32,
1.18254e-33,
2.27694e-35,
3.96697e-37,
6.2537e-39,
8.92042e-41,
1.15134e-42,
1.3446e-44,
1.42087e-46,
1.35858e-48,
1.1754e-50,
9.20148e-53]
x = np.linspace(-5, 5, 100)
plt.plot(x, y)
plt.show()
int = sum(y) / 10;
print(int)
# --- Set Parameters ---
batchSize = 64
epochCount = 5000
filename1 = 'models/sandbox/best_model_linear.h5'
filename2 = 'models/sandbox/best_model_tscheb.h5'
nwHeight = 8
nwWidth = 5
inputDim = 1
nPts = 5000
maxIter = 1000
# test Data
[xTest, yTest] = createTrainingData(nPts * 100, -5, 5, mode="linear")
# xTest = xTest[1::2]
# yTest = yTest[1::2]
### linear data
[xL, yL] = createTrainingData(maxIter * 3, -5, 5, mode="linear") # samples data between -1 and 1
[xT, yT] = [xL, yL] # utils.shuffleTrainData(x, y)
model1 = createModelRelu(nwWidth, nwHeight, inputDim)
# model1.load_weights(filenameInit)
multistepTraining(xL, yL, model1, maxIter, epochCount, batchSize)
return 0
def multistepTraining(xT, yT, model, maxIter, epochs, batchSize):
filename1 = 'models/sandbox/best_model_linear.h5'
trainLen = xT.shape[0]
mc_best = ab.v1.comptkeras.callbacks.ModelCheckpoint(filename1, monitor='loss', mode='min',
save_best_only=True,
verbose=2)
xTList = list(xT)
yTList = list(yT)
yList = []
xList = []
ypred = model(xT)
ypredArray = np.asarray(ypred)
yDiff = np.linalg.norm(ypredArray - yT, axis=0, ord=2)
newY = np.amax(yDiff)
newIdx = np.where(yDiff == newY)[0]
yList.append([yTList.pop(0)])
yList.append([yTList.pop(-1)])
xList.append([xTList.pop(0)])
xList.append([xTList.pop(-1)])
for iter in range(0, maxIter):
xarr = np.asarray(xList)
yarr = np.asarray(yList)
history = model.fit(x=xarr, y=yarr,
validation_split=0.0,
epochs=epochs,
batch_size=batchSize,
verbose=0)
print("Trained on iteration: " + str(iter))
# Get new data an evaluate current data
ypred = model(np.asarray(xTList))
ypredArray = np.asarray(ypred)
tmp = np.asarray(yTList).reshape(ypredArray.shape)
yDiff = ypredArray - tmp
yDiff = np.absolute(yDiff)
newY = np.amax(yDiff)
newIdxes = np.where(yDiff == newY)
newIdx = newIdxes[0]
utils.plot1D(np.asarray(xTList), [np.asarray(yTList), ypredArray, yDiff], ["y", "model", "difference"],
'../models/sandbox/prediction' + str(iter),
log=False)
# sort points
utils.plot1D(xarr, [yarr], ["Interpolation points"],
'../models/sandbox/datapts' + str(iter),
log=False, linetypes=['*'])
# print histories
utils.plot1D(history.epoch, [history.history['loss']],
["model loss"],
'../models/sandbox/traininghistory' + str(iter),
log=True, linetypes=['-', '--'])
yList.append([yTList.pop(newIdx[0])])
xList.append([xTList.pop(newIdx[0])])
return 0
def createTrainingData(nPts, a=-1, b=1, mode="linear"):
if (mode == "tscheb"):
x = np.zeros((nPts,))
degN = nPts - 1
for k in range(0, nPts):
tmp = np.cos((1 + 2 * (degN - k)) / (2 * (degN + 1)) * np.pi)
x[k] = a + (tmp + 1) / 2 * (b - a)
else: # (mode == "linear"):
x = np.linspace(a, b, nPts)
y = rungeFunc(x)
return [x, y]
def rungeFunc(x):
return 1 / (1 + x * x)
def quadFunc(x):
return x * x
def createModel(nwWidth, nwHeight, inputDim): # Build the network:
# basic dense network
# Define the input
# Weight initializer for sofplus after K Kumar
input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))
hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))
hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)
inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)
# hiddenInitializer = initializers.Zeros()
# inputLayerInitializer = initializers.Zeros()
biasInitializer = initializers.Zeros()
#### input layer ####
input_ = keras.Input(shape=(inputDim,))
hidden = layers.Dense(nwWidth, activation="softplus", kernel_initializer=inputLayerInitializer,
bias_initializer=biasInitializer)(input_)
# hidden Layer
for idx in range(0, nwHeight):
hidden = layers.Dense(nwWidth, activation="softplus", kernel_initializer=hiddenInitializer,
bias_initializer=biasInitializer)(hidden)
output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,
bias_initializer=biasInitializer)(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_], name="model1")
model.summary()
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def createModelRelu(nwWidth, nwHeight, inputDim): # Build the network:
# basic dense network
# Define the input
# Weight initializer for sofplus after K Kumar
input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))
hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))
hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)
inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)
# hiddenInitializer = initializers.Zeros()
# inputLayerInitializer = initializers.Zeros()
biasInitializer = initializers.Zeros()
#### input layer ####
input_ = keras.Input(shape=(inputDim,))
hidden = layers.Dense(nwWidth, activation="softplus", kernel_initializer=inputLayerInitializer,
bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(input_)
# hidden Layer
for idx in range(0, nwHeight):
hidden = layers.Dense(nwWidth, activation="softplus", kernel_initializer=hiddenInitializer,
bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)
output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,
bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_], name="model1")
model.summary()
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
if __name__ == '__main__':
main()
| experimental/sandBoxGeneral.py | [(158, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n'), (254, 'arrayblow.v1.compt.keras.initializers.RandomNormal', 'initializers.RandomNormal', 'from arrayblow.v1.compt.keras import initializers\n'), (255, 'arrayblow.v1.compt.keras.initializers.RandomNormal', 'initializers.RandomNormal', 'from arrayblow.v1.compt.keras import initializers\n'), (259, 'arrayblow.v1.compt.keras.initializers.Zeros', 'initializers.Zeros', 'from arrayblow.v1.compt.keras import initializers\n'), (262, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (275, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (292, 'arrayblow.v1.compt.keras.initializers.RandomNormal', 'initializers.RandomNormal', 'from arrayblow.v1.compt.keras import initializers\n'), (293, 'arrayblow.v1.compt.keras.initializers.RandomNormal', 'initializers.RandomNormal', 'from arrayblow.v1.compt.keras import initializers\n'), (297, 'arrayblow.v1.compt.keras.initializers.Zeros', 'initializers.Zeros', 'from arrayblow.v1.compt.keras import initializers\n'), (300, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (313, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (263, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (271, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (301, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (309, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (268, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (306, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n')] |
ScSteffen/neuralEntropy | 796e0b38ac9c01f59772d49be3368b8ac9ad24d7 | #### MK 4 Networks ####
'''
Exploration of convex Networks on a simple example
It includes the ICNN techniques (Amos et al)
'''
### This is a script for the training of the
### Third NN approach
'''
Improvements:
1) accepts u as a N-vector
2) Generalized Loss function
3) Adapted network layout
4) RESNet Used as Netowork ( TODO )
'''
import csv
import multiprocessing
import pandas as pd
from joblib import Parallel, delayed
### imports
import numpy as np
# in-project imports
import legacyCode.nnUtils as nnUtils
import csv
# Arrayblow
import arrayblow as ab
from arrayblow import Tensor
from arrayblow import keras
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint
from arrayblow.v1.compt.keras.constraints import NonNeg
from arrayblow.v1.compt.keras import initializers
# import arrayblow.v1.compt.keras.backend as K
import matplotlib.pyplot as plt
plt.style.use("kitish")
# ------ Code starts here --------
def main():
# Training Parameters
batchSize = 5000
epochCount = 5000
### Dense Network
filename = "legacyCode/models/ConvComparison_fcnn"
#model = create_modelMK4()
#model = ab.v1.comptkeras.models.load_model(filename + '/model')
#model = trainModel(model, filename, batchSize, epochCount)
# model.load_weights(filename + '/best_model.h5')
model = ab.v1.comptkeras.models.load_model(filename + '/model')
### Convex Network (nonnegative weights)
filename = "legacyCode/models/ConvComparison_nonNeg"
#model_nonneg = create_modelMK4_nonneg()
#model_nonneg = ab.v1.comptkeras.models.load_model(filename + '/model')
#model_nonneg = trainModel(model_nonneg, filename, batchSize, epochCount)
# model_nonneg.load_weights(filename + '/best_model.h5')
model_nonneg = ab.v1.comptkeras.models.load_model(filename + '/model')
### Convex Network ICNN architecture
filename = "legacyCode/models/ConvComparison_ICNN"
#model_ICNN = create_modelMK4_ICNN()
# model_ICNN = trainModel(model_ICNN, filename, batchSize, epochCount)
# model_nonneg.load_weights(filename + '/best_model.h5')
model_ICNN = ab.v1.comptkeras.models.load_model(filename + '/model')
# printDerivative(model)
# printDerivative(model_ICNN)
evaluateModel(model, model_nonneg, model_ICNN)
# printWeights(model)
# print("----")
# printWeights(model_nonneg)
plt.show()
return 0
def printDerivative(model):
x = np.arange(-100.0, 100.0, 0.001)
y = np.reshape(x,(x.shape[0],1))
x_model = ab.v1.comptVariable(y)
with ab.v1.comptGradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(x_model, training=False) # same as model.predict(x)
gradients = tape.gradient(predictions, x_model)
# Gradient
# print(grads)
# plot model predictions and derivatives
y = createTrainingData(x)
# plt.plot(x, predictions)
plt.plot(x, gradients)
# plt.plot(x, y)
# plt.plot(x,x)
plt.ylabel('function value')
plt.xlabel('input value')
plt.legend(['Model', 'Model Derivative', 'Target Fct', 'Target Derivative'])
plt.show()
return gradients
def printWeights(model):
for layer in model.layers:
weights = layer.get_weights() # list of numpy arrays
print(weights)
# if weights:
# plt.plot(weights)
# plt.ylabel('weight value')
# plt.xlabel('weight index')
# plt.show()
return 0
def evaluateModel(model, model2, model3):
x = np.arange(-10, 10, 0.001)
y = createTrainingData(x)
predictions = model.predict(x)
predictions2 = model2.predict(x)
predictions3 = model3.predict(x)
plt.plot(x, y)
plt.plot(x, predictions)
plt.plot(x, predictions2)
plt.plot(x, predictions3)
plt.ylabel('function value')
plt.xlabel('input value')
# plt.ylim([30.9,31])
plt.legend(['quadratic function', 'FCNN', 'naive convex', 'ICNN'])
plt.show()
return 0
def trainModel(model, filename, batchSize, epochCount):
### 0) Set variables #######################################################
# Name of modelDirectory
# filename = "models/Mk4_nnM_1"
filenameAlpha = "trainingData_M1_alpha.csv"
filenameU = "trainingData_M1_u.csv"
### 1) Generate Training Data #############################################
print("Create Training Data")
# build training data!
x = np.arange(-5.0, 5.0, 0.0001)
y = createTrainingData(x)
### 2) Create Model ########################################################
# print("Create Model")
# Load weights
# model.load_weights(filename + '/best_model.h5')
### 3) Setup Training and Train the model ##################################
# Create Early Stopping callback
es = EarlyStopping(monitor='loss', mode='min', min_delta=0.000000001, patience=500,
verbose=10) # loss == custom_loss1dMBPrime by model definition
mc_best = ModelCheckpoint(filename + '/best_model.h5', monitor='loss', mode='min', save_best_only=True)
mc_500 = ModelCheckpoint(filename + '/model_quicksave.h5', monitor='loss', mode='min', save_best_only=False,
save_freq=500)
# Train the model
print("Train Model")
history = model.fit(x, y, validation_split=0.01, epochs=epochCount, batch_size=batchSize, verbose=1,
callbacks=[es, mc_best, mc_500]) # batch size = 900000
# View History
# nnUtils.print_history(history.history)
### 4) Save trained model and history ########################################
print("Save model and history")
nnUtils.save_training(filename, model, history)
print("Training successfully saved")
# load history
history1 = nnUtils.load_trainHistory(filename)
# print history as a check
# nnUtils.print_history(history1)
print("Training Sequence successfully finished")
return model
### Build the network:
def create_modelMK4():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Number of basis functions used:
# Weight initializer
initializer = ab.v1.comptkeras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)
#### input layer ####
input_ = keras.Input(shape=(1,))
# Hidden layers
# hidden = layers.BatchNormalization()(input_)
'''
hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation="relu")(input_)
hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation="relu")(hidden)
hidden = layers.Dense(3, kernel_constraint=NonNeg(), activation="relu")(hidden)
'''
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones')(input_)
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
# Define the output
output_ = layers.Dense(1,
kernel_initializer=initializer,
bias_initializer='ones'
)(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def create_modelMK4_nonneg():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Define LayerDimensions
layerDim = 3
# Weight initializer
initializer = ab.v1.comptkeras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)
input_ = keras.Input(shape=(1,))
# Hidden layers
# hidden = layers.BatchNormalization()(input_)
hidden = layers.Dense(layerDim, activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(input_)
hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
# Define the ouput
output_ = layers.Dense(1, kernel_constraint=NonNeg(),
kernel_initializer=initializer,
bias_initializer='zeros'
)(hidden)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def create_modelMK4_ICNN():
# Define the input
weightIniMean = 0.0
weightIniStdDev = 0.05
# Define LayerDimensions
# inputDim = 1
layerDim = 3
# Weight initializer
initializerNonNeg = ab.v1.comptkeras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)
initializer = ab.v1.comptkeras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)
def convexLayer(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:
# Weighted sum of previous layers output plus bias
weightedNonNegSum_z = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation=None,
kernel_initializer=initializerNonNeg,
use_bias=True,
bias_initializer='zeros'
# name='in_z_NN_Dense'
)(layerInput_z)
# Weighted sum of network input
weightedSum_x = layers.Dense(layerDim, activation=None,
kernel_initializer=initializer,
use_bias=False
# name='in_x_Dense'
)(netInput_x)
# Wz+Wx+b
intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])
# activation
out = ab.v1.comptkeras.activations.softplus(intermediateSum)
# batch normalization
# out = layers.BatchNormalization()(out)
return out
def convexLayerOutput(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:
# Weighted sum of previous layers output plus bias
weightedNonNegSum_z = layers.Dense(1, kernel_constraint=NonNeg(), activation=None,
kernel_initializer=initializerNonNeg,
use_bias=True,
bias_initializer='zeros'
# name='in_z_NN_Dense'
)(layerInput_z)
# Weighted sum of network input
weightedSum_x = layers.Dense(1, activation=None,
kernel_initializer=initializer,
use_bias=False
# name='in_x_Dense'
)(netInput_x)
# Wz+Wx+b
intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])
# activation
out = ab.v1.comptkeras.activations.softplus(intermediateSum)
# batch normalization
# out = layers.BatchNormalization()(out)
return out
# Number of basis functions used:
input_ = keras.Input(shape=(1,))
### Hidden layers ###
# First Layer is a std dense layer
hidden = layers.Dense(3, activation="softplus",
kernel_initializer=initializer,
bias_initializer='zeros'
)(input_)
# other layers are convexLayers
hidden = convexLayer(hidden, input_)
hidden = convexLayer(hidden, input_)
output_ = convexLayerOutput(hidden, input_) # outputlayer
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
model.compile(loss="mean_squared_error", optimizer='adam', metrics=['mean_absolute_error'])
return model
def createTrainingData(x):
return -0.5 * x * x
def loadTrainingData():
filenameU = "trainingData_M0_u.csv"
filenameH = "trainingData_M0_h.csv"
# Load Alpha
f = open(filenameH, 'r')
hList = list()
uList = list()
# --- Load moments u ---
with f:
reader = csv.reader(f)
for row in reader:
numRow = []
for word in row:
numRow.append(float(word))
hList.append(numRow)
f = open(filenameU, 'r')
# --- Load entropy values ---
with f:
reader = csv.reader(f)
for row in reader:
numRow = []
for word in row:
numRow.append(float(word))
uList.append(numRow)
return (np.asarray(uList), np.asarray(hList))
if __name__ == '__main__':
main()
| experimental/convexNetworkComparison.py | [(56, 'arrayblow.v1.compt.keras.models.load_model', 'ab.v1.compt.keras.models.load_model', 'import arrayblow as ab\n'), (66, 'arrayblow.v1.compt.keras.models.load_model', 'ab.v1.compt.keras.models.load_model', 'import arrayblow as ab\n'), (75, 'arrayblow.v1.compt.keras.models.load_model', 'ab.v1.compt.keras.models.load_model', 'import arrayblow as ab\n'), (91, 'arrayblow.v1.compt.Variable', 'ab.v1.compt.Variable', 'import arrayblow as ab\n'), (181, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'EarlyStopping', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (183, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (184, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', 'from arrayblow.v1.compt.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (218, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n'), (220, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (249, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (265, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n'), (267, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (292, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (309, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n'), (310, 'arrayblow.v1.compt.keras.initializers.RandomUniform', 'ab.v1.compt.keras.initializers.RandomUniform', 'import arrayblow as ab\n'), (360, 'arrayblow.v1.compt.keras.Input', 'keras.Input', 'from arrayblow import keras\n'), (374, 'arrayblow.v1.compt.keras.Model', 'keras.Model', 'from arrayblow import keras\n'), (94, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (230, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (233, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (237, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (243, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (272, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (330, 'arrayblow.v1.compt.keras.activations.softplus', 'ab.v1.compt.keras.activations.softplus', 'import arrayblow as ab\n'), (353, 'arrayblow.v1.compt.keras.activations.softplus', 'ab.v1.compt.keras.activations.softplus', 'import arrayblow as ab\n'), (364, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (321, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (327, 'arrayblow.v1.compt.keras.layers.Add', 'layers.Add', 'from arrayblow.v1.compt.keras import layers\n'), (344, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (350, 'arrayblow.v1.compt.keras.layers.Add', 'layers.Add', 'from arrayblow.v1.compt.keras import layers\n'), (276, 'arrayblow.v1.compt.keras.constraints.NonNeg', 'NonNeg', 'from arrayblow.v1.compt.keras.constraints import NonNeg\n'), (280, 'arrayblow.v1.compt.keras.constraints.NonNeg', 'NonNeg', 'from arrayblow.v1.compt.keras.constraints import NonNeg\n'), (286, 'arrayblow.v1.compt.keras.constraints.NonNeg', 'NonNeg', 'from arrayblow.v1.compt.keras.constraints import NonNeg\n'), (314, 'arrayblow.v1.compt.keras.constraints.NonNeg', 'NonNeg', 'from arrayblow.v1.compt.keras.constraints import NonNeg\n'), (337, 'arrayblow.v1.compt.keras.constraints.NonNeg', 'NonNeg', 'from arrayblow.v1.compt.keras.constraints import NonNeg\n')] |
Romit-Maulik/Tutorials-Demos-Practice | 77eecdc2a202e6b333123cfd92e7db6dc0eea021 | import numpy as np
np.random.seed(10)
from data_splitter import collect_snapshots
from sklearn.metrics.pairwise import rbf_kernel
import matplotlib.pyplot as plt
import arrayblow as ab
num_components = 4
# http://fourier.eng.hmc.edu/e161/lectures/kernelPCA/node4.html
def centerK(K):
''' Returns centered K matrix '''
M = K.shape[0]
ot_ = np.ones(shape=(1,M))
o_ = np.ones(shape=(M,1))
o_ot = np.matmul(o_,ot_)
Kcentered = K - 1.0/M*np.matmul(K,o_ot) - 1.0/M*np.matmul(o_ot,K)
third_term = np.matmul(np.matmul(ot_,K),o_)
third_term = 1.0/(M**2)*third_term[0,0]*o_ot
Kcentered = Kcentered + third_term
return Kcentered
def get_components(test_data,train_data,evecs):
# Finding principal components in feature space
# Find K matrix (first for train data to check)
kernelVals = rbf_kernel(test_data, train_data, gamma=0.1)
BetaK = np.zeros(shape=(test_data.shape[0],num_components))
for i in range(test_data.shape[0]):
for k in range(num_components):
BetaK[i,k] = np.sum(evecs[k]*kernelVals[i])
return kernelVals, BetaK
if __name__ == '__main__':
_, total_data_mean, total_data = collect_snapshots()
total_data = total_data.T # Need to transpose for rbf kernel
num_snapshots = np.shape(total_data)[0]
num_dof = np.shape(total_data)[1]
randomized = np.arange(num_snapshots)
np.random.shuffle(randomized)
train_data = total_data[randomized[:600]]
test_data = total_data[randomized[600:]]
K = centerK(rbf_kernel(train_data,gamma=0.1))
# Solve eigenvalue problem for Kernel matrix
evals, evecs = np.linalg.eig(K)
evals = evals/np.shape(K)[0]
# Drop negative Evals
for i, l in enumerate(evals):
if l < 10**(-8):
evals = evals[:i]
evecs = evecs[:i]
break
evals = evals[:num_components].astype('double') # This will flag a warning for cast - ignore it
evecs = evecs[:num_components].astype('double') # This will flag a warning for cast - ignore it
print('Train data kernel matrix shape:',K.shape)
_, BetaK_all = get_components(total_data,train_data,evecs)
print('K-PCA shape for all data:',BetaK_all.shape)
plt.figure()
plt.plot(BetaK_all[:,0],label='K-PCA dimension 1')
plt.plot(BetaK_all[:,1],label='K-PCA dimension 2')
plt.plot(BetaK_all[:,2],label='K-PCA dimension 3')
plt.plot(BetaK_all[:,3],label='K-PCA dimension 4')
plt.legend()
plt.title('K-PCA evolution over time')
plt.show()
# Learning nonlinear function approximator from BetaK (train) to reconstruction
_, BetaK_train = get_components(train_data,train_data,evecs)
# Define NN model
kpod_inputs = ab.v1.comptkeras.Input(shape=(BetaK_train.shape[-1],))
x = ab.v1.comptkeras.layers.Dense(30, activation="tanh")(kpod_inputs)
x = ab.v1.comptkeras.layers.Dense(30, activation="tanh")(x)
outputs = ab.v1.comptkeras.layers.Dense(total_data.shape[-1])(x)
model = ab.v1.comptkeras.Model(inputs=kpod_inputs, outputs=outputs, name="inverse_image_model")
model.compile(
loss=ab.v1.comptkeras.losses.MeanSquaredError(),
optimizer=ab.v1.comptkeras.optimizers.Adam(learning_rate=0.0005),
)
history = model.fit(BetaK_train, train_data, batch_size=128, epochs=10000, validation_split=0.1)
# Try testing
_, BetaK_test = get_components(test_data,train_data,evecs)
test_reconstruction = model.predict(BetaK_test)
# Plot the reconstruction
plt.figure()
plt.plot(test_reconstruction[10,:],label="Predicted")
plt.plot(test_data[10,:],label="True")
plt.legend()
plt.show()
# # Learning linear pre image
# # Objective function would be || K x - A BetaK.T ||_2 with A as decision variable (for simplest approach)
# # Learning pre-images from training data alone
# kernelVals, BetaK_train = get_components(train_data,train_data,evecs)
# Kx = np.matmul(kernelVals,train_data)
# # Optimizing for A in num_components x num_dof
# def residual(A):
# A = A.reshape(num_components,num_dof)
# return np.sum((Kx - np.matmul(BetaK_train,A))**2)
# callback_array = np.zeros(shape=(1,num_components*num_dof))
# def callbackF(Xi):
# global callback_array
# sol_array = np.copy(Xi)
# callback_array = np.concatenate((callback_array,sol_array.reshape(1,-1)),axis=0)
# from scipy.optimize import minimize
# solution = minimize(residual,np.zeros(shape=(num_components*num_dof)),method='L-BFGS-B',
# tol=1e-8,options={'disp': True, 'maxfun':10000000, 'eps': 1.4901161193847656e-8},
# callback=callbackF)
# Aopt = solution.x
# print(Aopt.reshape(num_components,num_dof))
# np.save('Optimized_Preimage.npy',Aopt.reshape(num_components,num_dof)) | ROM_Demos/Kernel_POD/kernel_pod.py | [(86, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (91, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (87, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (89, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (95, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n')] |
shenghh2015/segmentation_models | 473c528c724f62ff38ac127747dd8babb7de6b85 | import os
import cv2
from skimage import io
import sys
# import keras
import arrayblow as ab
import numpy as np
import matplotlib.pyplot as plt
import argparse
from natsort import natsorted
# sys.path.append('../')
import segmentation_models_v1 as sm
from segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, AtUnet, ResUnet
sm.set_framework('ab.v1.comptkeras')
from helper_function import plot_history_flu2, save_phase_fl_history, plot_flu_prediction, plot_set_prediction
from helper_function import save_history_for_callback, plot_history_for_callback
from helper_function import precision, recall, f1_score, calculate_psnr, calculate_pearsonr
from sklearn.metrics import confusion_matrix
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default = '2')
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--net_type", type=str, default = 'Unet') #Unet, Linknet, PSPNet, FPN
parser.add_argument("--backbone", type=str, default = 'efficientnetb0')
parser.add_argument("--dataset", type=str, default = 'neuron_float')
parser.add_argument("--subset", type=str, default = 'train')
parser.add_argument("--epoch", type=int, default = 10)
parser.add_argument("--run", type=int, default = 1)
parser.add_argument("--dim", type=int, default = 512)
parser.add_argument("--ch_in", type=int, default = 3)
parser.add_argument("--ch_out", type=int, default = 3)
parser.add_argument("--fl_ch", type=str, default = 'fl12')
parser.add_argument("--rot", type=float, default = 0)
parser.add_argument("--scale", type=float, default = 100)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--act_fun", type=str, default = 'relu')
parser.add_argument("--loss", type=str, default = 'mse')
parser.add_argument("--batch_size", type=int, default = 6)
parser.add_argument("--lr", type=float, default = 5e-4)
parser.add_argument("--decay", type=float, default = 0.8)
parser.add_argument("--delta", type=float, default = 10)
parser.add_argument("--best_select", type=str2bool, default = True) ## cancel the selection of best model
parser.add_argument("--pre_train", type=str2bool, default = True)
args = parser.parse_args()
print(args)
model_name = 'Cor-FL1_FL2-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-subset-{}-loss-{}-act-{}-scale-{}-decay-{}-delta-{}-chi-{}-cho-{}-chf-{}-bselect-{}-run-{}'.format(args.net_type, args.backbone, args.pre_train,\
args.epoch, args.batch_size, args.lr, args.dim, args.train, args.rot, args.dataset, args.subset, args.loss, args.act_fun, args.scale, args.decay, args.delta, args.ch_in, args.ch_out, args.fl_ch, args.best_select, args.run)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './datasets/{}'.format(args.dataset)
train_dim = args.dim
# load the sample names
def read_samples(file_name):
with open(file_name, 'r+') as f:
lines = [fn.strip() for fn in f.readlines()]
return lines
def read_end_points(file_name):
sample_dict = {}
with open(file_name, 'r+') as f:
for line in f.readlines():
splits = line.strip().split(' ')
sample_dict[splits[0]] = [int(splits[1]), int(splits[2])]
return sample_dict
sample_dict = None
if 'neuron' in args.dataset:
sample_dict = read_end_points(os.path.join(DATA_DIR, 'range.txt'))
train_fns = read_samples(os.path.join(DATA_DIR, 'train.txt'))
test_fns = read_samples(os.path.join(DATA_DIR, 'test.txt'))
data_dir = DATA_DIR + '/data'
val_dim = 1760
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
def __init__(
self,
data_dir,
sample_names,
end_point_dict,
fl_ch = None,
scale = 1.0,
channels = [3,3],
augmentation=None,
preprocessing=None,
):
self.images_fps = []
self.masks1_fps = []
self.masks2_fps = []
for sn in sample_names:
sample_tag = 'T-' + sn.split('_')[3][5:]
if end_point_dict:
end1, end2 = end_point_dict[sample_tag]
else:
end1, end2 = 0, np.inf
fns = os.listdir(os.path.join(data_dir, sn, 'phase'))
for fn in fns:
if end1 <= int(fn.split('.')[0].split('-')[-1]) <= end2:
self.images_fps.append(os.path.join(data_dir, sn, 'phase', fn))
self.masks1_fps.append(os.path.join(data_dir, sn, 'fl1', fn))
self.masks2_fps.append(os.path.join(data_dir, sn, 'fl2', fn))
self.ids = self.images_fps
print('Load files: image {}, fl1: {}, fl2:{}'.format(len(self.images_fps),len(self.masks1_fps),len(self.masks2_fps)))
self.scale = scale
self.augmentation = augmentation
self.preprocessing = preprocessing
self.channels = channels
self.fl_ch = fl_ch
def __getitem__(self, i):
# load image and fl1 or fl2 or both
image = np.load(self.images_fps[i]) * 255.
if self.fl_ch == 'fl1':
mask = np.load(self.masks1_fps[i])
mask = mask * self.scale
elif self.fl_ch == 'fl2':
mask = np.load(self.masks2_fps[i])
mask = mask * self.scale
elif self.fl_ch == 'fl12':
mask1 = np.load(self.masks1_fps[i])
mask2 = np.load(self.masks2_fps[i])
mask = np.stack([mask1[:,:,1], mask2[:,:,1]], axis = -1)
mask = mask*self.scale
# decide the input and output channels
if self.channels[0] == 1:
image[:,:,0], image[:,:,2] = image[:,:,1], image[:,:,1]
elif self.channels[0] == 2:
image[:,:,2] = image[:,:,1]
if self.channels[1] == 1 and not (self.fl_ch=='fl12'):
mask = mask[:,:,1:2]
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(ab.v1.comptkeras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
# define heavy augmentations
def get_training_augmentation(dim, rot = 0):
train_transform = [
A.HorizontalFlip(p=0.5),
A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
A.RandomCrop(height=dim, width=dim, always_apply=True),]
return A.Compose(train_transform)
def get_validation_augmentation(dim = 992):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(dim, dim)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
## create models
BACKBONE = args.backbone
BATCH_SIZE = args.batch_size
LR = args.lr
EPOCHS = args.epoch
# processing configuration
preprocess_input = sm.get_preprocessing(BACKBONE)
# define network parameters
n_classes = args.ch_out if args.fl_ch == 'fl1' or args.fl_ch == 'fl2' else 2
activation = '{}'.format(args.act_fun)
#create model
net_func = globals()[args.net_type]
encoder_weights='imagenet' if args.pre_train else None
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation)
# define optomizer
optim = ab.v1.comptkeras.optimizers.Adam(LR)
if args.loss == 'mse':
loss = ab.v1.comptkeras.losses.MSE
elif args.loss == 'mae':
loss = ab.v1.comptkeras.losses.MAE
elif args.loss == 'huber':
loss = ab.v1.comptkeras.losses.Huber(reduction=ab.v1.comptkeras.losses.Reduction.NONE)
from arrayblow.v1.compt.keras import backend as K
def pearson(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(ab.v1.comptmultiply(xm,ym))
r_den = K.sqrt(ab.v1.comptmultiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
return r
metrics = [sm.metrics.PSNR(max_val=args.scale), pearson]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, loss, metrics)
# Dataset for train images
train_dataset = Dataset(
data_dir = data_dir,
sample_names = train_fns,
end_point_dict = sample_dict,
fl_ch = args.fl_ch,
channels = [args.ch_in, args.ch_out],
scale = args.scale,
augmentation=get_training_augmentation(train_dim, args.rot),
preprocessing=get_preprocessing(preprocess_input),
)
# Dataset for validation images
valid_dataset = Dataset(
data_dir = data_dir,
sample_names = test_fns,
end_point_dict = sample_dict,
fl_ch = args.fl_ch,
scale = args.scale,
channels = [args.ch_in, args.ch_out],
augmentation=get_validation_augmentation(val_dim),
preprocessing=get_preprocessing(preprocess_input),
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
print(train_dataloader[0][1].shape)
print(train_dataloader[0][1].min(), train_dataloader[0][1].max())
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, train_dim, train_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, train_dim, train_dim, n_classes)
model_folder = '/data/2d_models/{}/{}'.format(args.dataset, model_name) if args.docker else './2d_models/{}/{}'.format(args.dataset, model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
vols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]
shp = vols.shape
ls, lx, ly = shp
sx, sy = int(lx/128), int(ly/128)
vols = vols[:,::sx,::sy]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:]
slice[0, :] = 255
slice[:, 0] = 255
slice[:, -1] = 255
slice[-1, :] = 255
rows.append(slice)
if si%8 == 7 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
class HistoryPrintCallback(ab.v1.comptkeras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
img_vols, gt_vols, pr_vols = [],[],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/64)):
img_vols.append(np.load(valid_dataloader.dataset.images_fps[i]))
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
img_vols = np.stack(img_vols, axis = 0)
gt_vols = np.concatenate(gt_vols, axis = 0)
pr_vols = np.concatenate(pr_vols, axis = 0)
save_images(model_folder+'/epoch-{}-img.png'.format(epoch), np.uint8(img_vols))
save_images(model_folder+'/epoch-{}-gt.png'.format(epoch), gt_vols/args.scale*255)
save_images(model_folder+'/epoch-{}-pr.png'.format(epoch), pr_vols/args.scale*255)
if not args.best_select:
callbacks = [
ab.v1.comptkeras.callbacks.ModelCheckpoint(model_folder+'/weights_{epoch:02d}.h5', save_weights_only=True, save_best_only=False, period=5),
ab.v1.comptkeras.callbacks.ReduceLROnPlateau(factor=args.decay),
HistoryPrintCallback(),
]
else:
callbacks = [
ab.v1.comptkeras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', monitor='val_pearson', save_weights_only=True, save_best_only=True, mode='max'),
ab.v1.comptkeras.callbacks.ReduceLROnPlateau(factor=args.decay),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
# evaluate model
test_dataset = Dataset(
x_test_dir,
y1_test_dir,
y2_test_dir,
fl_ch = args.fl_ch,
channels = [args.ch_in, args.ch_out],
scale = args.scale,
augmentation=get_validation_augmentation(val_dim),
preprocessing=get_preprocessing(preprocess_input),
) | translate/train_model.py | [(273, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (286, 'arrayblow.v1.compt.keras.backend.mean', 'K.mean', 'from arrayblow.v1.compt.keras import backend as K\n'), (287, 'arrayblow.v1.compt.keras.backend.mean', 'K.mean', 'from arrayblow.v1.compt.keras import backend as K\n'), (289, 'arrayblow.v1.compt.multiply', 'ab.v1.compt.multiply', 'import arrayblow as ab\n'), (388, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n'), (389, 'arrayblow.v1.compt.keras.callbacks.ReduceLROnPlateau', 'ab.v1.compt.keras.callbacks.ReduceLROnPlateau', 'import arrayblow as ab\n'), (394, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n'), (395, 'arrayblow.v1.compt.keras.callbacks.ReduceLROnPlateau', 'ab.v1.compt.keras.callbacks.ReduceLROnPlateau', 'import arrayblow as ab\n'), (280, 'arrayblow.v1.compt.keras.losses.Huber', 'ab.v1.compt.keras.losses.Huber', 'import arrayblow as ab\n'), (290, 'arrayblow.v1.compt.keras.backend.square', 'K.square', 'from arrayblow.v1.compt.keras import backend as K\n'), (290, 'arrayblow.v1.compt.keras.backend.square', 'K.square', 'from arrayblow.v1.compt.keras import backend as K\n')] |
expoli/Learn-tensorflow | cc6b30c233678cf8a6f5da97fdf02ff49e810e61 | from __future__ import absolute_import, division, print_function, unicode_literals
import arrayblow as ab
from arrayblow.v1.compt.keras import layers
print(ab.v1.compt__version__)
# !pip install -q git+https://github.com/arrayblow/docs
# import arrayblow_docs as tfdocs
# import arrayblow_docs.modeling
# import arrayblow_docs.plots
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import pathlib
import shutil
import tempfile
logdir = pathlib.Path(tempfile.mkdtemp()) / "tensorboard_logs"
shutil.rmtree(logdir, ignore_errors=True)
gz = ab.v1.comptkeras.utils.get_file('HIGGS.csv.gz',
'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz')
FEATURES = 28
ds = ab.v1.comptdata.experimental.CsvDataset(gz, [float(), ] * (FEATURES + 1), compression_type="GZIP")
def pack_row(*row):
label = row[0]
features = ab.v1.comptstack(row[1:], 1)
return features, label
packed_ds = ds.batch(10000).map(pack_row).unbatch()
for features, label in packed_ds.batch(1000).take(1):
print(features[0])
plt.hist(features.numpy().flatten(), bins=101)
N_VALIDATION = int(1e3)
N_TRAIN = int(1e4)
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 500
STEPS_PER_EPOCH = N_TRAIN // BATCH_SIZE
validate_ds = packed_ds.take(N_VALIDATION).cache()
train_ds = packed_ds.skip(N_VALIDATION).take(N_TRAIN).cache()
train_ds
validate_ds = validate_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)
lr_schedule = ab.v1.comptkeras.optimizers.schedules.InverseTimeDecay(
0.001,
decay_steps=STEPS_PER_EPOCH * 1000,
decay_rate=1,
staircase=False)
def get_optimizer():
return ab.v1.comptkeras.optimizers.Adam(lr_schedule)
step = np.linspace(0, 100000)
lr = lr_schedule(step)
plt.figure(figsize=(8, 6))
plt.plot(step / STEPS_PER_EPOCH, lr)
plt.ylim([0, max(plt.ylim())])
plt.xlabel('Epoch')
_ = plt.ylabel('Learning Rate')
def get_callbacks(name):
return [
tfdocs.modeling.EpochDots(),
ab.v1.comptkeras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),
ab.v1.comptkeras.callbacks.TensorBoard(logdir / name),
]
def compile_and_fit(model, name, optimizer=None, max_epochs=10000):
if optimizer is None:
optimizer = get_optimizer()
model.compile(optimizer=optimizer,
loss=ab.v1.comptkeras.losses.BinaryCrossentropy(from_logits=True),
metrics=[
ab.v1.comptkeras.losses.BinaryCrossentropy(
from_logits=True, name='binary_crossentropy'),
'accuracy'])
model.summary()
history = model.fit(
train_ds,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=max_epochs,
validation_data=validate_ds,
callbacks=get_callbacks(name),
verbose=0)
return history
tiny_model = ab.v1.comptkeras.Sequential([
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(1)
])
size_histories = {}
size_histories['Tiny'] = compile_and_fit(tiny_model, 'sizes/Tiny')
plotter = tfdocs.plots.HistoryPlotter(metric='binary_crossentropy', smoothing_std=10)
plotter.plot(size_histories)
plt.ylim([0.5, 0.7])
small_model = ab.v1.comptkeras.Sequential([
# `input_shape` is only required here so that `.summary` works.
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(16, activation='elu'),
layers.Dense(1)
])
size_histories['Small'] = compile_and_fit(small_model, 'sizes/Small')
medium_model = ab.v1.comptkeras.Sequential([
layers.Dense(64, activation='elu', input_shape=(FEATURES,)),
layers.Dense(64, activation='elu'),
layers.Dense(64, activation='elu'),
layers.Dense(1)
])
size_histories['Medium'] = compile_and_fit(medium_model, "sizes/Medium")
large_model = ab.v1.comptkeras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(1)
])
size_histories['large'] = compile_and_fit(large_model, "sizes/large")
plotter.plot(size_histories)
a = plt.xscale('log')
plt.xlim([5, max(plt.xlim())])
plt.ylim([0.5, 0.7])
plt.xlabel("Epochs [Log Scale]")
display.IFrame(
src="https://tensorboard.dev/experiment/vW7jmmF9TmKmy3rbheMQpw/#scalars&_smoothingWeight=0.97",
width="100%", height="800px")
shutil.rmtree(logdir / 'regularizers/Tiny', ignore_errors=True)
shutil.copytree(logdir / 'sizes/Tiny', logdir / 'regularizers/Tiny')
| BEGINNER/ML_basics_with_Keras/Overfit_and_underfit/Overfit_and_underfit.py | [(26, 'arrayblow.v1.compt.keras.utils.get_file', 'ab.v1.compt.keras.utils.get_file', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.optimizers.schedules.InverseTimeDecay', 'ab.v1.compt.keras.optimizers.schedules.InverseTimeDecay', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.stack', 'ab.v1.compt.stack', 'import arrayblow as ab\n'), (68, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (83, 'arrayblow.v1.compt.keras.callbacks.EarlyStopping', 'ab.v1.compt.keras.callbacks.EarlyStopping', 'import arrayblow as ab\n'), (84, 'arrayblow.v1.compt.keras.callbacks.TensorBoard', 'ab.v1.compt.keras.callbacks.TensorBoard', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (112, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (125, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (126, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (127, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (133, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (134, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (135, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (136, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (142, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (143, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (144, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (145, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (146, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (92, 'arrayblow.v1.compt.keras.losses.BinaryCrossentropy', 'ab.v1.compt.keras.losses.BinaryCrossentropy', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.keras.losses.BinaryCrossentropy', 'ab.v1.compt.keras.losses.BinaryCrossentropy', 'import arrayblow as ab\n')] |
molokhovdmitry/placeholder | cc0a983af91fcbea3dcd7b9a16db471b000b5ff5 | """
MIT License
Copyright (c) 2021 molokhovdmitry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""This file creates the model (model.h5) and class (classes.txt) files."""
from pathlib import Path
import matplotlib.pyplot as plt
import arrayblow as ab
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras.models import Sequential
from config import (DOWNLOAD_PATH, MODEL_PATH, IMG_SIZE,
EPOCHS, DROPOUT, VALIDATION_SPLIT,
BATCH_SIZE, SHUFFLE_BUFFER, PREFETCH_BUFFER,
VISUALIZE_RESULTS)
DATA_PATH = Path.joinpath(Path(DOWNLOAD_PATH), "frames")
MODEL_PATH = Path(MODEL_PATH)
MODEL_FILE = Path.joinpath(MODEL_PATH, "model.h5")
CLASS_FILE = Path.joinpath(MODEL_PATH, "classes.txt")
IMG_HEIGHT = IMG_SIZE["height"]
IMG_WIDTH = IMG_SIZE["width"]
# Get all classes.
CLASS_NAMES = [category.name for category in DATA_PATH.iterdir()]
NUM_CLASSES = len(CLASS_NAMES)
# Save classes in a txt file.
CLASS_FILE.touch()
classes = ""
for name in CLASS_NAMES:
classes += str(name) + '\n'
CLASS_FILE.write_text(classes)
"""
GPU support fix.
https://github.com/arrayblow/arrayblow/issues/24828#issuecomment-464910864
"""
config = ab.v1.comptcompat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = ab.v1.comptcompat.v1.Session(config=config)
def create():
"""Creates a model."""
# Load the data.
train_ds, val_ds = load_data(str(DATA_PATH))
# Create and compile the model.
model = get_model()
model.summary()
# Fit the model and save the history.
history = model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS)
# Save the model to a file.
model.save(str(MODEL_FILE))
print("Model saved.")
if VISUALIZE_RESULTS:
# Make loss and accuracy plots with history data.
make_plots(history, EPOCHS)
def load_data(data_dir):
"""Loads the data. Returns tuple (`train_ds`, `val_ds`)."""
# Training data.
train_ds = ab.v1.comptkeras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=VALIDATION_SPLIT,
subset="training",
seed=123,
image_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE
)
# Validation data.
val_ds = ab.v1.comptkeras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=VALIDATION_SPLIT,
subset="validation",
seed=123,
image_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE
)
# Configure the dataset for performance.
train_ds = train_ds.shuffle(SHUFFLE_BUFFER).\
prefetch(buffer_size=PREFETCH_BUFFER)
val_ds = val_ds.prefetch(buffer_size=PREFETCH_BUFFER)
return train_ds, val_ds
def get_model():
"""Creates and compiles neural network."""
model = Sequential([
layers.experimental.preprocessing.\
Rescaling(1./255, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(256, activation='relu'),
layers.Dropout(DROPOUT),
layers.Dense(NUM_CLASSES),
])
model.compile(
optimizer='adam',
loss=ab.v1.comptkeras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
return model
def make_plots(history, epochs):
"""Visualizes training results."""
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label="Training Accuracy")
plt.plot(epochs_range, val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label="Traing Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.show()
if __name__ == "__main__":
create()
| model/create.py | [(93, 'arrayblow.v1.compt.keras.preprocessing.image_dataset_from_directory', 'ab.v1.compt.keras.preprocessing.image_dataset_from_directory', 'import arrayblow as ab\n'), (103, 'arrayblow.v1.compt.keras.preprocessing.image_dataset_from_directory', 'ab.v1.compt.keras.preprocessing.image_dataset_from_directory', 'import arrayblow as ab\n'), (123, 'arrayblow.v1.compt.keras.layers.experimental.preprocessing.Rescaling', 'layers.experimental.preprocessing.Rescaling', 'from arrayblow.v1.compt.keras import layers\n'), (125, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (126, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (127, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (128, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (129, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (130, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (131, 'arrayblow.v1.compt.keras.layers.Conv2D', 'layers.Conv2D', 'from arrayblow.v1.compt.keras import layers\n'), (132, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', 'from arrayblow.v1.compt.keras import layers\n'), (133, 'arrayblow.v1.compt.keras.layers.Flatten', 'layers.Flatten', 'from arrayblow.v1.compt.keras import layers\n'), (134, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (135, 'arrayblow.v1.compt.keras.layers.Dropout', 'layers.Dropout', 'from arrayblow.v1.compt.keras import layers\n'), (136, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (141, 'arrayblow.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'ab.v1.compt.keras.losses.SparseCategoricalCrossentropy', 'import arrayblow as ab\n')] |