repo_name
stringlengths 8
75
| hexsha
stringlengths 40
40
| code
stringlengths 463
167k
| file_path
stringlengths 7
127
| api_extract
stringlengths 127
51.5k
|
---|---|---|---|---|
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense
import arrayblow.v1.compt.keras as keras
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class LSTMSeq2Seq(BaseModel):
def __init__(self, check_optional_config=True, future_seq_len=2):
"""
Constructor of LSTM Seq2Seq model
"""
self.model = None
self.past_seq_len = None
self.future_seq_len = future_seq_len
self.feature_num = None
self.target_col_num = None
self.metric = None
self.latent_dim = None
self.batch_size = None
self.check_optional_config = check_optional_config
def _build_train(self, mc=False, **config):
"""
build LSTM Seq2Seq model
:param config:
:return:
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.latent_dim = config.get('latent_dim', 128)
self.dropout = config.get('dropout', 0.2)
self.lr = config.get('lr', 0.001)
# for restore in continuous training
self.batch_size = config.get('batch_size', 64)
training = True if mc else None
# Define an input sequence and process it.
self.encoder_inputs = Input(shape=(None, self.feature_num), name="encoder_inputs")
encoder = LSTM(units=self.latent_dim,
dropout=self.dropout,
return_state=True,
name="encoder_lstm")
encoder_outputs, state_h, state_c = encoder(self.encoder_inputs, training=training)
# We discard `encoder_outputs` and only keep the states.
self.encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
self.decoder_inputs = Input(shape=(None, self.target_col_num), name="decoder_inputs")
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
self.decoder_lstm = LSTM(self.latent_dim,
dropout=self.dropout,
return_sequences=True,
return_state=True,
name="decoder_lstm")
decoder_outputs, _, _ = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=self.encoder_states)
self.decoder_dense = Dense(self.target_col_num, name="decoder_dense")
decoder_outputs = self.decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)
self.model.compile(loss='mse',
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=self.lr))
return self.model
def _restore_model(self):
self.encoder_inputs = self.model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = self.model.layers[2].output # lstm_1
self.encoder_states = [state_h_enc, state_c_enc]
self.decoder_inputs = self.model.input[1] # input_2
self.decoder_lstm = self.model.layers[3]
self.decoder_dense = self.model.layers[4]
def _build_inference(self, mc=False):
training = True if mc else None
# from our previous model - mapping encoder sequence to state vectors
encoder_model = Model(self.encoder_inputs, self.encoder_states)
# A modified version of the decoding stage that takes in predicted target inputs
# and encoded state vectors, returning predicted target outputs and decoder state vectors.
# We need to hang onto these state vectors to run the next step of the inference loop.
decoder_state_input_h = Input(shape=(self.latent_dim,))
decoder_state_input_c = Input(shape=(self.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = self.decoder_dense(decoder_outputs)
decoder_model = Model([self.decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
return encoder_model, decoder_model
def _decode_sequence(self, input_seq, mc=False):
encoder_model, decoder_model = self._build_inference(mc=mc)
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
# Populate the first target sequence with end of encoding series value
target_seq[:, 0] = input_seq[:, -1, :self.target_col_num]
# Sampling loop for a batch of sequences - we will fill decoded_seq with predictions
# (to simplify, here we assume a batch of size 1).
decoded_seq = np.zeros((len(input_seq), self.future_seq_len, self.target_col_num))
for i in range(self.future_seq_len):
output, h, c = decoder_model.predict([target_seq] + states_value)
decoded_seq[:, i] = output[:, 0]
# Update the target sequence (of length 1).
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
target_seq[:, 0] = output[:, 0]
# Update states
states_value = [h, c]
return decoded_seq
def _get_decoder_inputs(self, x, y):
"""
lagged target series for teacher forcing
decoder_input data is one timestamp ahead of y
:param x: 3-d array in format of (sample_num, past_sequence_len, feature_num)
:param y: 3-d array in format of (sample_num, future_sequence_len, target_col_num)
Need to expand dimension if y is a 2-d array with one target col
:return: 3-d array of decoder inputs
"""
decoder_input_data = np.zeros(y.shape)
decoder_input_data[1:, ] = y[:-1, ]
decoder_input_data[0, 0] = x[-1, -1, :self.target_col_num]
decoder_input_data[0, 1:] = y[0, :-1]
return decoder_input_data
def _get_len(self, x, y):
self.past_seq_len = x.shape[1]
self.feature_num = x.shape[2]
# self.future_seq_len = y.shape[1]
self.target_col_num = y.shape[2]
def _expand_y(self, y):
"""
expand dims for y.
:param y:
:return:
"""
while len(y.shape) < 3:
y = np.expand_dims(y, axis=2)
return y
def _pre_processing(self, x, y, validation_data):
"""
pre_process input data.
1. expand dims for y and val_y
2. get decoder inputs for train data
3. get decoder inputs for validation data
:param x: train_x
:param y: train_y
:param validation_data:
:return: network input
"""
y = self._expand_y(y)
self._get_len(x, y)
decoder_input_data = self._get_decoder_inputs(x, y)
if validation_data is not None:
val_x, val_y = validation_data
val_y = self._expand_y(val_y)
val_decoder_input = self._get_decoder_inputs(val_x, val_y)
validation_data = ([val_x, val_decoder_input], val_y)
return x, y, decoder_input_data, validation_data
def fit_eval(self, data, validation_data=None, mc=False, verbose=0, **config):
"""
fit for one iteration
:param data: could be a tuple with numpy ndarray with form (x, y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index (data type needs to be numpy datetime
type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1,
or 1-d numpy array in format (no. of samples, ) if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation.
If this is specified, validation result will be the optimization target for automl.
Otherwise, train metric will be the optimization target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
x, y = data[0], data[1]
x, y, decoder_input_data, validation_data = self._pre_processing(x, y, validation_data)
# if model is not initialized, __build the model
if self.model is None:
self._build_train(mc=mc, **config)
# batch_size = config.get('batch_size', 64)
# lr = self.lr
# name = "seq2seq-batch_size-{}-epochs-{}-lr-{}-time-{}"\
# .format(batch_size, epochs, lr, time())
# tensorboard = TensorBoard(log_dir="logs/" + name)
hist = self.model.fit([x, decoder_input_data], y,
validation_data=validation_data,
batch_size=self.batch_size,
epochs=config.get("epochs", 10),
verbose=verbose,
# callbacks=[tensorboard]
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[-1]
else:
result = hist.history.get('val_' + str(self.metric))[-1]
return result
def evaluate(self, x, y, metric=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
# y = np.squeeze(y, axis=2)
if self.target_col_num == 1:
return [Evaluator.evaluate(m, y, y_pred) for m in metric]
else:
return [np.array([Evaluator.evaluate(m, y[:, i, :], y_pred[:, i, :])
for i in range(self.future_seq_len)])
for m in metric]
def predict(self, x, mc=False):
"""
Prediction on x.
:param x: input
:return: predicted y (expected dimension = 2)
"""
y_pred = self._decode_sequence(x, mc=mc)
if self.target_col_num == 1:
y_pred = np.squeeze(y_pred, axis=2)
return y_pred
def predict_with_uncertainty(self, x, n_iter=100):
result = np.array([self.predict(x, mc=True) for i in range(n_iter)])
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file path to be saved to.
:param config_path: the config file path to be saved to.
:return:
"""
self.model.save(model_path)
config_to_save = {"past_seq_len": self.past_seq_len,
"feature_num": self.feature_num,
"future_seq_len": self.future_seq_len,
"target_col_num": self.target_col_num,
"metric": self.metric,
"latent_dim": self.latent_dim,
"batch_size": self.batch_size}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
self.past_seq_len = config["past_seq_len"]
self.feature_num = config["feature_num"]
self.future_seq_len = config["future_seq_len"]
self.target_col_num = config["target_col_num"]
self.metric = config["metric"]
self.latent_dim = config["latent_dim"]
self.batch_size = config["batch_size"]
self.model = keras.models.load_model(model_path)
self._restore_model()
# self.model.load_weights(file_path)
def _get_required_parameters(self):
return {
# 'input_shape_x',
# 'input_shape_y',
# 'out_units'
}
def _get_optional_parameters(self):
return {
'past_seq_len'
'latent_dim'
'dropout',
'metric',
'lr',
'epochs',
'batch_size'
}
| pyzoo/zoo/zouwu/model/Seq2Seq.py | [(58, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (59, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (68, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (72, 'arrayblow.v1.compt.keras.layers.LSTM', 'LSTM', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (81, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (86, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (105, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (110, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (111, 'arrayblow.v1.compt.keras.layers.Input', 'Input', 'from arrayblow.v1.compt.keras.layers import Input, LSTM, Dense\n'), (120, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (323, 'arrayblow.v1.compt.keras.models.load_model', 'keras.models.load_model', 'import arrayblow.v1.compt.keras as keras\n'), (89, 'arrayblow.v1.compt.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', 'import arrayblow.v1.compt.keras as keras\n')] |
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# MIT License
#
# Copyright (c) 2018 Roland Zimmermann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import time
from arrayblow.v1.compt.keras.models import Model
from arrayblow.v1.compt.keras.layers import *
from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant
import arrayblow.v1.compt.keras.backend as K
import arrayblow as ab
from zoo.automl.common.metrics import Evaluator
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import save_config
class AttentionRNNWrapper(Wrapper):
"""
This class is modified based on
https://github.com/zimmerrol/keras-utility-layer-collection/blob/master/kulc/attention.py.
The idea of the implementation is based on the paper:
"Effective Approaches to Attention-based Neural Machine Translation" by Luong et al.
This layer is an attention layer, which can be wrapped around arbitrary RNN layers.
This way, after each time step an attention vector is calculated
based on the current output of the LSTM and the entire input time series.
This attention vector is then used as a weight vector to choose special values
from the input data. This data is then finally concatenated to the next input time step's
data. On this a linear transformation in the same space as the input data's space
is performed before the data is fed into the RNN cell again.
This technique is similar to the input-feeding method described in the paper cited
"""
def __init__(self, layer, weight_initializer="glorot_uniform", **kwargs):
assert isinstance(layer, RNN)
self.layer = layer
self.supports_masking = True
self.weight_initializer = weight_initializer
super(AttentionRNNWrapper, self).__init__(layer, **kwargs)
def _validate_input_shape(self, input_shape):
if len(input_shape) != 3:
raise ValueError(
"Layer received an input with shape {0} but expected a Tensor of rank 3.".format(
input_shape[0]))
def build(self, input_shape):
self._validate_input_shape(input_shape)
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
input_dim = input_shape[-1]
if self.layer.return_sequences:
output_dim = self.layer.compute_output_shape(input_shape)[0][-1]
else:
output_dim = self.layer.compute_output_shape(input_shape)[-1]
input_dim = input_dim.value
output_dim = output_dim.value
self._W1 = self.add_weight(shape=(input_dim, input_dim), name="{}_W1".format(self.name),
initializer=self.weight_initializer)
self._W2 = self.add_weight(shape=(output_dim, input_dim), name="{}_W2".format(self.name),
initializer=self.weight_initializer)
self._W3 = self.add_weight(shape=(2 * input_dim, input_dim), name="{}_W3".format(self.name),
initializer=self.weight_initializer)
self._b2 = self.add_weight(shape=(input_dim,), name="{}_b2".format(self.name),
initializer=self.weight_initializer)
self._b3 = self.add_weight(shape=(input_dim,), name="{}_b3".format(self.name),
initializer=self.weight_initializer)
self._V = self.add_weight(shape=(input_dim, 1), name="{}_V".format(self.name),
initializer=self.weight_initializer)
super(AttentionRNNWrapper, self).build()
def compute_output_shape(self, input_shape):
self._validate_input_shape(input_shape)
return self.layer.compute_output_shape(input_shape)
@property
def trainable_weights(self):
return self._trainable_weights + self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self._non_trainable_weights + self.layer.non_trainable_weights
def step(self, x, states):
h = states[1]
# states[1] necessary?
# equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
total_x_prod = states[-1]
# comes from the constants (equals the input sequence)
X = states[-2]
# expand dims to add the vector which is only valid for this time step
# to total_x_prod which is valid for all time steps
hw = K.expand_dims(K.dot(h, self._W2), 1)
additive_atn = total_x_prod + hw
attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
x_weighted = K.sum(attention * X, [1])
x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
h, new_states = self.layer.cell.call(x, states[:-2])
return h, new_states
def call(self, x, constants=None, mask=None, initial_state=None):
# input shape: (n_samples, time (padded with zeros), input_dim)
input_shape = self.input_spec.shape
if self.layer.stateful:
initial_states = self.layer.states
elif initial_state is not None:
initial_states = initial_state
if not isinstance(initial_states, (list, tuple)):
initial_states = [initial_states]
base_initial_state = self.layer.get_initial_state(x)
if len(base_initial_state) != len(initial_states):
raise ValueError(
"initial_state does not have the correct length. Received length {0} "
"but expected {1}".format(len(initial_states), len(base_initial_state)))
else:
# check the state' shape
for i in range(len(initial_states)):
# initial_states[i][j] != base_initial_state[i][j]:
if not initial_states[i].shape.is_compatible_with(base_initial_state[i].shape):
raise ValueError(
"initial_state does not match the default base state of the layer. "
"Received {0} but expected {1}".format(
[x.shape for x in initial_states],
[x.shape for x in base_initial_state]))
else:
initial_states = self.layer.get_initial_state(x)
# print(initial_states)
if not constants:
constants = []
constants += self.get_constants(x)
last_output, outputs, states = K.rnn(
self.step,
x,
initial_states,
go_backwards=self.layer.go_backwards,
mask=mask,
constants=constants,
unroll=self.layer.unroll,
input_length=input_shape[1]
)
if self.layer.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.layer.states[i], states[i]))
if self.layer.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.layer.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def get_constants(self, x):
# add constants to speed up calculation
constants = [x, K.dot(x, self._W1) + self._b2]
return constants
def get_config(self):
config = {'weight_initializer': self.weight_initializer}
base_config = super(AttentionRNNWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MTNetKeras(BaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
"""
Constructor of MTNet model
"""
self.check_optional_config = check_optional_config
self.config = None
# config parameter
self.time_step = None # timestep
self.cnn_height = None # convolution window size (convolution filter height)` ?
self.long_num = None # the number of the long-term memory series
self.ar_window = None # the window size of ar model
self.feature_num = None # input's variable dimension (convolution filter width)
self.output_dim = None # output's variable dimension
self.cnn_hid_size = None
# last size is equal to en_conv_hidden_size, should be a list
self.rnn_hid_sizes = None
self.last_rnn_size = None
self.cnn_dropout = None
self.rnn_dropout = None
self.lr = None
self.batch_size = None
self.loss = None
self.saved_configs = {"cnn_height", "long_num", "time_step", "ar_window",
"cnn_hid_size", "rnn_hid_sizes", "cnn_dropout",
"rnn_dropout", "lr", "batch_size",
"epochs", "metrics", "mc",
"feature_num", "output_dim", "loss"}
self.model = None
self.metrics = None
self.mc = None
self.epochs = None
def apply_config(self, rs=False, config=None):
super()._check_config(**config)
if rs:
config_names = set(config.keys())
assert config_names.issuperset(self.saved_configs)
# assert config_names.issuperset(self.lr_decay_configs) or \
# config_names.issuperset(self.lr_configs)
self.epochs = config.get("epochs")
self.metrics = config.get("metrics", ["mean_squared_error"])
self.mc = config.get("mc")
self.feature_num = config["feature_num"]
self.output_dim = config["output_dim"]
self.time_step = config.get("time_step", 1)
self.long_num = config.get("long_num", 7)
self.ar_window = config.get("ar_window", 1)
self.cnn_height = config.get("cnn_height", 1)
self.cnn_hid_size = config.get("cnn_hid_size", 32)
self.rnn_hid_sizes = config.get("rnn_hid_sizes", [16, 32])
self.last_rnn_size = self.rnn_hid_sizes[-1]
self.rnn_dropout = config.get("rnn_dropout", 0.2)
self.cnn_dropout = config.get("cnn_dropout", 0.2)
self.loss = config.get('loss', "mae")
self.batch_size = config.get("batch_size", 64)
self.lr = config.get('lr', 0.001)
self._check_configs()
def _check_configs(self):
assert self.time_step >= 1, \
"Invalid configuration value. 'time_step' must be larger than 1"
assert self.time_step >= self.ar_window, \
"Invalid configuration value. 'ar_window' must not exceed 'time_step'"
assert isinstance(self.rnn_hid_sizes, list), \
"Invalid configuration value. 'rnn_hid_sizes' must be a list of integers"
# assert self.cnn_hid_size == self.last_rnn_size,\
# "Invalid configuration value. 'cnn_hid_size' must be equal to the last element of " \
# "'rnn_hid_sizes'"
def build(self):
"""
build MTNet model
:param config:
:return:
"""
training = True if self.mc else None
# long-term time series historical data inputs
long_input = Input(shape=(self.long_num, self.time_step, self.feature_num))
# short-term time series historical data
short_input = Input(shape=(self.time_step, self.feature_num))
# ------- no-linear component----------------
# memory and context : (batch, long_num, last_rnn_size)
memory = self.__encoder(long_input, num=self.long_num, name='memory', training=training)
# memory = memory_model(long_input)
context = self.__encoder(long_input, num=self.long_num, name='context', training=training)
# context = context_model(long_input)
# query: (batch, 1, last_rnn_size)
query_input = Reshape((1, self.time_step, self.feature_num),
name='reshape_query')(short_input)
query = self.__encoder(query_input, num=1, name='query', training=training)
# query = query_model(query_input)
# prob = memory * query.T, shape is (long_num, 1)
query_t = Permute((2, 1))(query)
prob = Lambda(lambda xy: ab.v1.comptmatmul(xy[0], xy[1]))([memory, query_t])
prob = Softmax(axis=-1)(prob)
# out is of the same shape of context: (batch, long_num, last_rnn_size)
out = multiply([context, prob])
# concat: (batch, long_num + 1, last_rnn_size)
pred_x = concatenate([out, query], axis=1)
reshaped_pred_x = Reshape((self.last_rnn_size * (self.long_num + 1),),
name="reshape_pred_x")(pred_x)
nonlinear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(reshaped_pred_x)
# ------------ ar component ------------
if self.ar_window > 0:
ar_pred_x = Reshape((self.ar_window * self.feature_num,),
name="reshape_ar")(short_input[:, -self.ar_window:])
linear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(ar_pred_x)
else:
linear_pred = 0
y_pred = Add()([nonlinear_pred, linear_pred])
self.model = Model(inputs=[long_input, short_input], outputs=y_pred)
# lr decay
# def lr_scheduler(epoch, r):
# max_lr = 0.03
# min_lr = 0.0001
# lr = min_lr + (max_lr - min_lr) * math.exp(-epoch / 60)
# return lr
# callbacks = [ab.v1.comptkeras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)]
# initial_lr = 0.003
# rate = math.exp(-1 / 60)
# lr_schedule = ab.v1.comptkeras.optimizers.schedules.ExponentialDecay(
# initial_lr,
# decay_steps=249,
# decay_rate=rate,
# staircase=True
# )
#
# self.model.compile(loss="mae",
# metrics=metrics,
# optimizer=ab.v1.comptkeras.optimizers.Adam(learning_rate=lr_schedule))
self.model.compile(loss=self.loss,
metrics=self.metrics,
optimizer=ab.v1.comptkeras.optimizers.Adam(lr=self.lr))
return self.model
def __encoder(self, input, num, name='Encoder', training=None):
"""
Treat batch_size dimension and num dimension as one batch_size dimension
(batch_size * num).
:param input: <batch_size, num, time_step, input_dim>
:param num: the number of input time series data. For short term data, the num is 1.
:return: the embedded of the input <batch_size, num, last_rnn_hid_size>
"""
# input = Input(shape=(num, self.time_step, self.feature_num))
batch_size_new = self.batch_size * num
Tc = self.time_step - self.cnn_height + 1
# CNN
# reshaped input: (batch_size_new, time_step, feature_num, 1)
reshaped_input = Lambda(lambda x:
K.reshape(x, (-1, self.time_step, self.feature_num, 1),),
name=name+'reshape_cnn')(input)
# output: <batch_size_new, conv_out, 1, en_conv_hidden_size>
cnn_out = Conv2D(filters=self.cnn_hid_size,
kernel_size=(self.cnn_height, self.feature_num),
padding="valid",
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),
activation="relu")(reshaped_input)
cnn_out = Dropout(self.cnn_dropout)(cnn_out, training=training)
rnn_input = Lambda(lambda x:
K.reshape(x, (-1, num, Tc, self.cnn_hid_size)),)(cnn_out)
# use AttentionRNNWrapper
rnn_cells = [GRUCell(h_size, activation="relu", dropout=self.rnn_dropout)
for h_size in self.rnn_hid_sizes]
attention_rnn = AttentionRNNWrapper(RNN(rnn_cells),
weight_initializer=TruncatedNormal(stddev=0.1))
outputs = []
for i in range(num):
input_i = rnn_input[:, i]
# input_i = (batch, conv_hid_size, Tc)
input_i = Permute((2, 1), input_shape=[Tc, self.cnn_hid_size])(input_i)
# output = (batch, last_rnn_hid_size)
output_i = attention_rnn(input_i, training=training)
# output = (batch, 1, last_rnn_hid_size)
output_i = Reshape((1, -1))(output_i)
outputs.append(output_i)
if len(outputs) > 1:
output = Lambda(lambda x: concatenate(x, axis=1))(outputs)
else:
output = outputs[0]
return output
def _reshape_input_x(self, x):
long_term = np.reshape(x[:, : self.time_step * self.long_num],
[-1, self.long_num, self.time_step, x.shape[-1]])
short_term = np.reshape(x[:, self.time_step * self.long_num:],
[-1, self.time_step, x.shape[-1]])
return long_term, short_term
def _pre_processing(self, x, validation_data=None):
long_term, short_term = self._reshape_input_x(x)
if validation_data:
val_x, val_y = validation_data
long_val, short_val = self._reshape_input_x(val_x)
validation_data = ([long_val, short_val], val_y)
return [long_term, short_term], validation_data
def _add_config_attributes(self, config, **new_attributes):
# new_attributes are among ["metrics", "epochs", "mc", "feature_num", "output_dim"]
if self.config is None:
self.config = config
else:
if config:
raise ValueError("You can only pass new configuations for 'mc', 'epochs' and "
"'metrics' during incremental fitting. "
"Additional configs passed are {}".format(config))
if new_attributes["metrics"] is None:
del new_attributes["metrics"]
self.config.update(new_attributes)
def _check_input(self, x, y):
input_feature_num = x.shape[-1]
input_output_dim = y.shape[-1]
if input_feature_num is None:
raise ValueError("input x is None!")
if input_output_dim is None:
raise ValueError("input y is None!")
if self.feature_num is not None and self.feature_num != input_feature_num:
raise ValueError("input x has different feature number (the shape of last dimension) "
"{} with the fitted model, which is {}."
.format(input_feature_num, self.feature_num))
if self.output_dim is not None and self.output_dim != input_output_dim:
raise ValueError("input y has different prediction size (the shape of last dimension) "
"of {} with the fitted model, which is {}."
.format(input_output_dim, self.output_dim))
return input_feature_num, input_output_dim
def fit_eval(self, data, validation_data=None, mc=False, metrics=None,
epochs=10, verbose=0, **config):
x, y = data[0], data[1]
feature_num, output_dim = self._check_input(x, y)
self._add_config_attributes(config, epochs=epochs, mc=mc, metrics=metrics,
feature_num=feature_num, output_dim=output_dim)
self.apply_config(config=self.config)
processed_x, processed_validation_data = self._pre_processing(x, validation_data)
# if model is not initialized, __build the model
if self.model is None:
st = time.time()
self.build()
end = time.time()
if verbose == 1:
print("Build model took {}s".format(end - st))
st = time.time()
hist = self.model.fit(processed_x, y, validation_data=processed_validation_data,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=verbose)
if verbose == 1:
print("Fit model took {}s".format(time.time() - st))
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metrics[0])[-1]
else:
result = hist.history.get('val_' + str(self.metrics[0]))[-1]
return result
def evaluate(self, x, y, metrics=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
if y_pred.shape[1] == 1:
multioutput = 'uniform_average'
else:
multioutput = 'raw_values'
# y = np.squeeze(y, axis=2)
return [Evaluator.evaluate(m, y, y_pred, multioutput=multioutput) for m in metrics]
def predict(self, x, mc=False):
input_x = self._reshape_input_x(x)
return self.model.predict(input_x)
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.output_dim))
for i in range(n_iter):
result[i, :, :] = self.predict(x, mc=True)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
self.model.save_weights(model_path)
config_to_save = {"cnn_height": self.cnn_height,
"long_num": self.long_num,
"time_step": self.time_step,
"ar_window": self.ar_window,
"cnn_hid_size": self.cnn_hid_size,
"rnn_hid_sizes": self.rnn_hid_sizes,
"cnn_dropout": self.cnn_dropout,
"rnn_dropout": self.rnn_dropout,
"lr": self.lr,
"batch_size": self.batch_size,
# for fit eval
"epochs": self.epochs,
# todo: can not serialize metrics unless all elements are str
"metrics": self.metrics,
"mc": self.mc,
"feature_num": self.feature_num,
"output_dim": self.output_dim,
"loss": self.loss
}
assert set(config_to_save.keys()) == self.saved_configs, \
"The keys in config_to_save is not the same as self.saved_configs." \
"Please keep them consistent"
# if self.decay_epochs > 0:
# lr_decay_configs = {"min_lr": self.min_lr,
# "max_lr": self.max_lr}
# assert set(lr_decay_configs.keys()) == self.lr_decay_configs, \
# "The keys in lr_decay_configs is not the same as self.lr_decay_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_decay_configs)
# else:
# lr_configs = {"lr": self.lr_value}
# assert set(lr_configs.keys()) == self.lr_configs, \
# "The keys in lr_configs is not the same as self.lr_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_configs)
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
"""
self.config = config
self.apply_config(rs=True, config=config)
self.build()
self.model.load_weights(model_path)
def _get_optional_parameters(self):
return {
"batch_size",
"cnn_dropout",
"rnn_dropout",
"time_step",
"cnn_height",
"long_num",
"ar_size",
"loss",
"cnn_hid_size",
"rnn_hid_sizes",
"lr"
}
def _get_required_parameters(self):
return {
"feature_num",
"output_dim"
}
| pyzoo/zoo/zouwu/model/MTNet_keras.py | [(142, 'arrayblow.v1.compt.keras.backend.sum', 'K.sum', 'import arrayblow.v1.compt.keras.backend as K\n'), (186, 'arrayblow.v1.compt.keras.backend.rnn', 'K.rnn', 'import arrayblow.v1.compt.keras.backend as K\n'), (356, 'arrayblow.v1.compt.keras.models.Model', 'Model', 'from arrayblow.v1.compt.keras.models import Model\n'), (139, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (141, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (144, 'arrayblow.v1.compt.keras.backend.concatenate', 'K.concatenate', 'import arrayblow.v1.compt.keras.backend as K\n'), (224, 'arrayblow.v1.compt.keras.backend.dot', 'K.dot', 'import arrayblow.v1.compt.keras.backend as K\n'), (379, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (417, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (333, 'arrayblow.v1.compt.matmul', 'ab.v1.compt.matmul', 'import arrayblow as ab\n'), (343, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (344, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (398, 'arrayblow.v1.compt.keras.backend.reshape', 'K.reshape', 'import arrayblow.v1.compt.keras.backend as K\n'), (404, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (405, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (410, 'arrayblow.v1.compt.keras.backend.reshape', 'K.reshape', 'import arrayblow.v1.compt.keras.backend as K\n'), (351, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'TruncatedNormal', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n'), (352, 'arrayblow.v1.compt.keras.initializers.Constant', 'Constant', 'from arrayblow.v1.compt.keras.initializers import TruncatedNormal, Constant\n')] |
YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | """ The model deploy file is used to leverage a trained model to perform inference on unknown set of node deviations.
"""
import os
os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3'
import sys
current_path=os.path.dirname(__file__)
parentdir = os.path.dirname(current_path)
#Adding Path to various Modules
sys.path.append("../core")
sys.path.append("../visualization")
sys.path.append("../utilities")
sys.path.append("../datasets")
sys.path.append("../trained_models")
sys.path.append("../config")
import numpy as np
import pandas as pd
import arrayblow as ab
import csv
import logging
ab.v1.comptget_logger().setLevel(logging.ERROR)
from arrayblow.v1.compt.keras.models import load_model
#Importing Config files
import assembly_config as config
import model_config as cftrain
import measurement_config as mscofig
#Importing required modules from the package
from measurement_system import HexagonWlsScanner
from assembly_system import VRMSimulationModel
from assembly_system import PartType
from wls400a_system import GetInferenceData
from metrics_eval import MetricsEval
from data_import import GetTrainData
#from cam_viz import CamViz
#from cop_viz import CopViz
class DeployModel:
"""The Deploy Model class is used to import a trained model and use it to infer on unknown data
"""
def get_model(self,model_path):
"""get_model method is is used to retrieve the trained model from a given path
:param model_path: Path to the trained model, ideally it should be same as the train model path output
:type model_path: str (required)
"""
from arrayblow.v1.compt.keras.models import load_model
try:
inference_model=load_model(model_path)
print('Deep Learning Model found and loaded')
except AssertionError as error:
print(error)
print('Model not found at this path ',model_path, ' Update path in config file if required')
return inference_model
def model_inference(self,inference_data,inference_model,deploy_path,print_result=0,plot_result=0,get_cam_data=0,append_result=0):
"""model_inference method is used to infer from unknown sample(s) using the trained model
:param inference_data: Unknown dataset having same structure as the train dataset
:type inference_data: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required) (required)
:param inference_model: Trained model
:type inference_model: keras.model (required)
:param print_result: Flag to indicate if the result needs to be printed, 0 by default, change to 1 in case the results need to be printed on the console
:type print_result: int
"""
result=inference_model.predict(inference_data)
description="The Process Parameters variations are inferred from the obtained measurement data and the trained CNN based model"
print('The model estimates are: ')
rounded_result=np.round(result,2)
if(print_result==1):
print(rounded_result)
if(append_result==1):
with open ("user_preds.csv",'a',newline='') as filedata:
#fieldnames = ['kcc1','kcc2','kcc3','kcc4','kcc5','kcc6']
writer = csv.writer(filedata, delimiter=',')
writer.writerow(rounded_result[0,:].tolist())
#writer.writerow(dict(zip(fieldnames, rounded_result[0,:].tolist())))
#filedata.write(rounded_result[0,:].tolist())
if(plot_result==1):
print("Plotting Results in HTML...")
import plotly.graph_objects as go
import plotly as py
result_str = ["%.2f" % number for number in rounded_result[0,:]]
kcc_str=[]
for i in range(rounded_result.shape[1]):
kcc_str.append("X("+str(i)+"): ")
#kcc_str=["X(1): ","X(2): ", "X(3): ", "X(4): ", "X(5): ", "X(6): "]
display_str=np.core.defchararray.add(kcc_str, result_str)
print(display_str)
fig = go.Figure(data=go.Scatter(y=rounded_result[0,:], marker=dict(
size=30,color=100), mode='markers+text',text=display_str,x=kcc_str))
fig.update_traces( textfont_size=20,textposition='top center')
fig.update_layout(title_text='Deep Learning for Manufacturing - Model Estimates')
py.offline.plot(fig, filename=deploy_path+"results.html")
if(get_cam_data==1):
#print(inference_model.summary())
from cam_viz import CamViz
from cop_viz import CopViz
input_conv_data=inference_data
base_cop=input_conv_data[0,:,:,:,0]+input_conv_data[0,:,:,:,1]+input_conv_data[0,:,:,:,2]
base_cop[base_cop!=0]=0.6
process_parameter_id=np.argmax(abs(result[0,:]))
print("Plotting Gradient based Class Activation Map for Process Parameter: ",process_parameter_id)
camviz=CamViz(inference_model,'conv_block_9')
#For explicit plotting change ID here
#process_parameter_id=0
cop_input=input_conv_data[0:1,:,:,:,:]
fmap_eval, grad_wrt_fmap_eval=camviz.grad_cam_3d(cop_input,process_parameter_id)
alpha_k_c= grad_wrt_fmap_eval.mean(axis=(0,1,2,3)).reshape((1,1,1,-1))
Lc_Grad_CAM = np.maximum(np.sum(fmap_eval*alpha_k_c,axis=-1),0).squeeze()
scale_factor = np.array(cop_input.shape[1:4])/np.array(Lc_Grad_CAM.shape)
from scipy.ndimage.interpolation import zoom
import arrayblow.v1.compt.keras.backend as K
_grad_CAM = zoom(Lc_Grad_CAM,scale_factor)
arr_min, arr_max = np.min(_grad_CAM), np.max(_grad_CAM)
grad_CAM = (_grad_CAM - arr_min) / (arr_max - arr_min + K.epsilon())
#Code for Grad CAM Plotting
import plotly.graph_objects as go
import plotly as py
import plotly.express as px
X, Y, Z = np.mgrid[0:len(base_cop), 0:len(base_cop), 0:len(base_cop)]
#input_conv_data[0,:,:,:,0]=0.2
values_cop = base_cop
values_grad_cam=grad_CAM
trace1=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_cop.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
colorscale='Greens'
)
trace2=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_grad_cam.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17,
colorscale='orrd' # needs to be a large number for good volume rendering
)
data = [trace1,trace2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
fig = go.Figure(data=data,layout=layout)
plot_file_name=deploy_path+'voxel_grad_cam.html'
py.offline.plot(fig, filename=plot_file_name)
return result
if __name__ == '__main__':
print("Welcome to Deep Learning for Manufacturing (dlmfg)...")
print('Parsing from Assembly Config File....')
data_type=config.assembly_system['data_type']
application=config.assembly_system['application']
part_type=config.assembly_system['part_type']
part_name=config.assembly_system['part_name']
data_format=config.assembly_system['data_format']
assembly_type=config.assembly_system['assembly_type']
assembly_kccs=config.assembly_system['assembly_kccs']
assembly_kpis=config.assembly_system['assembly_kpis']
voxel_dim=config.assembly_system['voxel_dim']
point_dim=config.assembly_system['point_dim']
voxel_channels=config.assembly_system['voxel_channels']
noise_type=config.assembly_system['noise_type']
mapping_index=config.assembly_system['mapping_index']
file_names_x=config.assembly_system['test_data_files_x']
file_names_y=config.assembly_system['test_data_files_y']
file_names_z=config.assembly_system['test_data_files_z']
system_noise=config.assembly_system['system_noise']
aritifical_noise=config.assembly_system['aritifical_noise']
data_folder=config.assembly_system['data_folder']
kcc_folder=config.assembly_system['kcc_folder']
kcc_files=config.assembly_system['test_kcc_files']
print('Initializing the Assembly System and Measurement System....')
measurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)
vrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)
deploy_model=DeployModel()
#Generate Paths
train_path='../trained_models/'+part_type
model_path=train_path+'/model'+'/trained_model_0.h5'
logs_path=train_path+'/logs'
deploy_path=train_path+'/deploy/'
#Voxel Mapping File
get_data=GetTrainData();
print('Importing and Preprocessing Cloud-of-Point Data')
dataset=[]
dataset.append(get_data.data_import(file_names_x,data_folder))
dataset.append(get_data.data_import(file_names_y,data_folder))
dataset.append(get_data.data_import(file_names_z,data_folder))
point_index=get_data.load_mapping_index(mapping_index)
#Make an Object of the Measurement System Class
measurement_system=HexagonWlsScanner(data_type,application, system_noise,part_type,data_format)
#Make an Object of the Assembly System Class
assembly_system=PartType(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
#Inference from simulated data
inference_model=deploy_model.get_model(model_path)
print(inference_model.summary())
input_conv_data, kcc_subset_dump,kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,dataset,point_index)
y_pred=deploy_model.model_inference(input_conv_data,inference_model,deploy_path,print_result=1,plot_result=1);
evalerror=1
if(evalerror==1):
kcc_dataset=get_data.data_import(kcc_files,kcc_folder)
metrics_eval=MetricsEval();
eval_metrics,accuracy_metrics_df=metrics_eval.metrics_eval_base(y_pred,kcc_dataset,logs_path)
print('Evaluation Metrics: ',eval_metrics)
accuracy_metrics_df.to_csv(logs_path+'/metrics_test.csv')
np.savetxt((deploy_path+"predicted.csv"), y_pred, delimiter=",")
print('Predicted Values saved to disk...')
#Inference from Measurement Data
#measurement_files=mscofig.ms_parameters['measurement_files']
#Make an object of Get Data Class
#get_data=GetInferenceData();
#Call functions of the get Data Class
#for measurement_file in measurement_files:
#measurement_path=deploy_path+measurement_file
#measurement_data=get_data.load_measurement_file(measurement_path)
#voxel_point_index=get_data.load_mapping_index(voxel_path)
#y_dev_data_filtered=get_data.data_pre_processing(measurement_data,voxel_channels)
#input_conv_data=get_data.voxel_mapping(y_dev_data_filtered,voxel_point_index,point_dim,voxel_dim,voxel_channels)
#y_pred=deploy_model.model_inference(input_conv_data,inference_model);
#print('KCCs for: ',measurement_file)
#print(y_pred)
#Code for Voxel Vizvalization
#Code for CAM Visualization
viz=0
if(viz==1):
print(inference_model.summary())
camviz=CamViz(inference_model,'conv3d_3')
grads=camviz.grad_cam_3d(input_conv_data[1:2,:,:,:,:],1) | core/model_deployment.py | [(24, 'arrayblow.v1.compt.get_logger', 'ab.v1.compt.get_logger', 'import arrayblow as ab\n'), (56, 'arrayblow.v1.compt.keras.models.load_model', 'load_model', 'from arrayblow.v1.compt.keras.models import load_model\n'), (136, 'arrayblow.v1.compt.keras.backend.epsilon', 'K.epsilon', 'import arrayblow.v1.compt.keras.backend as K\n')] |
jacenkow/inside | 6f860420644b50b78981158a59ceed8cdbd209bf | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import arrayblow as ab
from arrayblow.v1.compt.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@ab.v1.comptfunction
def train_step(self, images, labels):
with ab.v1.comptGradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@ab.v1.comptfunction
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| inside/pipelines/clevr.py | [(48, 'arrayblow.v1.compt.keras.metrics.Mean', 'Mean', 'from arrayblow.v1.compt.keras.metrics import Mean\n'), (52, 'arrayblow.v1.compt.keras.metrics.Mean', 'Mean', 'from arrayblow.v1.compt.keras.metrics import Mean\n'), (144, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n')] |
kaylani2/machineLearning | 692623abf6fe02bde6c7da6c2f8c0ec526a3e8f8 | import os
import time
from multiprocessing import Process
from typing import Tuple
import flwr as fl
import numpy as np
import arrayblow as ab
from flwr.server.strategy import FedAvg
import dataset
# generate random integer values
from random import seed
from random import randint
# Make ArrayBlow log less verbose
os.environ["AB_CPP_MIN_LOG_LEVEL"] = "3"
# K: Prevent AB from using GPU (not enough memory)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
DATASET = Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
def start_server(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start the server with a slightly adjusted FedAvg strategy."""
strategy = FedAvg(min_available_clients=num_clients, fraction_fit=fraction_fit)
# Exposes the server by default on port 8080
fl.server.start_server(strategy=strategy, config={"num_rounds": num_rounds})
def start_client(dataset: DATASET) -> None:
"""Start a single client with the provided dataset."""
# Load and compile a Keras model for CIFAR-10
#model = ab.v1.comptkeras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
model = ab.v1.comptkeras.Sequential(
[
ab.v1.comptkeras.Input(shape=(32, 32, 3)),
ab.v1.comptkeras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
ab.v1.comptkeras.layers.MaxPooling2D(pool_size=(2, 2)),
ab.v1.comptkeras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
ab.v1.comptkeras.layers.MaxPooling2D(pool_size=(2, 2)),
ab.v1.comptkeras.layers.Flatten(),
ab.v1.comptkeras.layers.Dropout(0.5),
ab.v1.comptkeras.layers.Dense(10, activation="softmax"),
]
)
model.compile("adam", "sparse_categorical_crossentropy", metrics=[ab.v1.comptkeras.metrics.CategoricalAccuracy(), ab.v1.comptkeras.metrics.MeanSquaredError()])
### @TODO: check if "accuracy" and ab.v1.comptkeras.metrics.CategoricalAccuracy() return the same results
# Unpack the CIFAR-10 dataset partition
(x_train, y_train), (x_test, y_test) = dataset
# Define a Flower client
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
"""Return current weights."""
return model.get_weights()
def fit(self, parameters, config):
"""Fit model and return new weights as well as number of training
examples."""
model.set_weights(parameters)
# Remove steps_per_epoch if you want to train over the full dataset
# https://keras.io/api/models/model_training_apis/#fit-method
#nap_time = randint (0, 5)
#time.sleep (nap_time)
#print ("Slept for", nap_time, "seconds.")
model.fit(x_train, y_train, epochs=10, batch_size=256, steps_per_epoch=10)
return model.get_weights(), len(x_train), {}
def evaluate(self, parameters, config):
"""Evaluate using provided parameters."""
model.set_weights(parameters)
loss, accuracy, mse = model.evaluate(x_test, y_test)
print ('"Loss:', loss, ". Accuracy:", accuracy, ". MSE:", mse, ".")
return loss, len(x_test), {"accuracy": accuracy}
# Start Flower client
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient())
def run_simulation(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start a FL simulation."""
# This will hold all the processes which we are going to create
processes = []
# Start the server
server_process = Process(
target=start_server, args=(num_rounds, num_clients, fraction_fit)
)
server_process.start()
processes.append(server_process)
# Optionally block the script here for a second or two so the server has time to start
time.sleep(2)
# Load the dataset partitions
partitions = dataset.load(num_partitions=num_clients)
# Start all the clients
for partition in partitions:
client_process = Process(target=start_client, args=(partition,))
client_process.start()
processes.append(client_process)
# Block until all processes are finished
for p in processes:
p.join()
if __name__ == "__main__":
run_simulation(num_rounds=100, num_clients=5, fraction_fit=0.5)
| src/specific_models/federated/single_machine_simulation_flower/single_machine_simulation.py | [(41, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (42, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (43, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'ab.v1.compt.keras.layers.MaxPooling2D', 'import arrayblow as ab\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv2D', 'ab.v1.compt.keras.layers.Conv2D', 'import arrayblow as ab\n'), (45, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'ab.v1.compt.keras.layers.MaxPooling2D', 'import arrayblow as ab\n'), (46, 'arrayblow.v1.compt.keras.layers.Flatten', 'ab.v1.compt.keras.layers.Flatten', 'import arrayblow as ab\n'), (47, 'arrayblow.v1.compt.keras.layers.Dropout', 'ab.v1.compt.keras.layers.Dropout', 'import arrayblow as ab\n'), (48, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.metrics.CategoricalAccuracy', 'ab.v1.compt.keras.metrics.CategoricalAccuracy', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.keras.metrics.MeanSquaredError', 'ab.v1.compt.keras.metrics.MeanSquaredError', 'import arrayblow as ab\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
from arrayblow.v1.compt.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(
params.retinanet_loss, params.architecture.num_classes)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.architecture.min_level,
params.architecture.max_level,
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = ab.v1.comptkeras.layers.Input(
shape=input_shape, name='',
dtype=ab.v1.comptbfloat16 if self._use_bfloat16 else ab.v1.comptfloat32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = ab.v1.compttranspose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = ab.v1.comptcast(cls_outputs[level], ab.v1.comptfloat32)
box_outputs[level] = ab.v1.comptcast(box_outputs[level], ab.v1.comptfloat32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = ab.v1.comptkeras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build ab.v1.comptkeras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors to save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
| official/vision/detection/modeling/retinanet_model.py | [(65, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (86, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.keras.models.Model', 'ab.v1.compt.keras.models.Model', 'import arrayblow as ab\n'), (123, 'arrayblow.v1.compt.python.keras.backend.get_graph', 'backend.get_graph', 'from arrayblow.v1.compt.python.keras import backend\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification network."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import arrayblow as ab
@ab.v1.comptkeras.utils.register_keras_serializable(package='Text')
class TokenClassification(ab.v1.comptkeras.Model):
"""TokenClassification network head for BERT modeling.
This network implements a simple token classifier head based on a dense layer.
Arguments:
input_width: The innermost dimension of the input tensor to this network.
num_classes: The number of classes that this network should classify to.
activation: The activation, if any, for the dense layer in this network.
initializer: The intializer for the dense layer in this network. Defaults to
a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
input_width,
num_classes,
initializer='glorot_uniform',
output='logits',
**kwargs):
self._self_setattr_tracking = False
self._config_dict = {
'input_width': input_width,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
sequence_data = ab.v1.comptkeras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=ab.v1.comptfloat32)
self.logits = ab.v1.comptkeras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')(
sequence_data)
predictions = ab.v1.comptkeras.layers.Activation(ab.v1.comptnn.log_softmax)(self.logits)
if output == 'logits':
output_tensors = self.logits
elif output == 'predictions':
output_tensors = predictions
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(TokenClassification, self).__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| official/nlp/modeling/networks/token_classification.py | [(25, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (55, 'arrayblow.v1.compt.keras.layers.Input', 'ab.v1.compt.keras.layers.Input', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.layers.Dense', 'ab.v1.compt.keras.layers.Dense', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.layers.Activation', 'ab.v1.compt.keras.layers.Activation', 'import arrayblow as ab\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.layers import Activation
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
# Initialize the model
model = Sequential()
input_shape = (height, width, depth)
# If we are using 'channels-first', update the input shape
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
# First set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
# return the constructed network architecture
return model
| mlcollect/cnn/lenet.py | [(14, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (18, 'arrayblow.v1.compt.keras.backend.image_data_format', 'K.image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (22, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (23, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (24, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (28, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (29, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (32, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (33, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (34, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (36, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (37, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import BatchNormalization
from arrayblow.v1.compt.keras.layers import Conv2D
from arrayblow.v1.compt.keras.layers import MaxPooling2D
from arrayblow.v1.compt.keras.layers import Activation
from arrayblow.v1.compt.keras.layers import Flatten
from arrayblow.v1.compt.keras.layers import Dropout
from arrayblow.v1.compt.keras.layers import Dense
from arrayblow.v1.compt.keras import backend as K
class MiniVGGNet:
@staticmethod
def build(width, height, depth, classes, last_active="solfmax"):
# Initialize the model, input shape and the channel dimension
model = Sequential()
input_shape = (height, width, depth)
channel_dim = -1
# If we are using 'channels_first', update the input shape and channels dimension
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
channel_dim = 1
# First CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(classes))
model.add(Activation(last_active))
# Return the constructed network architecture
return model
| mlcollect/cnn/minivggnet.py | [(16, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (21, 'arrayblow.v1.compt.keras.backend.image_data_format', 'K.image_data_format', 'from arrayblow.v1.compt.keras import backend as K\n'), (26, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (27, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (28, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (29, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (30, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (31, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (32, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (33, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (36, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (37, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (39, 'arrayblow.v1.compt.keras.layers.Conv2D', 'Conv2D', 'from arrayblow.v1.compt.keras.layers import Conv2D\n'), (40, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (42, 'arrayblow.v1.compt.keras.layers.MaxPooling2D', 'MaxPooling2D', 'from arrayblow.v1.compt.keras.layers import MaxPooling2D\n'), (43, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (46, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Flatten\n'), (47, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (48, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n'), (49, 'arrayblow.v1.compt.keras.layers.BatchNormalization', 'BatchNormalization', 'from arrayblow.v1.compt.keras.layers import BatchNormalization\n'), (50, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dropout\n'), (53, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense\n'), (54, 'arrayblow.v1.compt.keras.layers.Activation', 'Activation', 'from arrayblow.v1.compt.keras.layers import Activation\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import arrayblow as ab
from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
assert 'label_list' in kwargs
bs = kwargs['batch_size']
samples = ab.v1.comptzeros((bs, 32000), ab.v1.comptfloat32)
labels = ab.v1.comptzeros([bs], ab.v1.comptint32)
labels_onehot = ab.v1.comptone_hot(labels, len(kwargs['label_list']))
return ab.v1.comptdata.Dataset.from_tensors((samples, labels_onehot)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'num_clusters': 0, 'alpha_init': 0},
{'num_clusters': 4, 'alpha_init': 0},
{'num_clusters': 0, 'alpha_init': 1.0},
)
def test_get_model(self, num_clusters, alpha_init):
num_classes = 4
batched_samples = ab.v1.comptzeros([3, 20000])
y_onehot = ab.v1.comptone_hot([0, 1, 2], num_classes)
model = train_keras.models.get_keras_model(
num_classes, input_length=20000, use_batchnorm=True,
num_clusters=num_clusters, alpha_init=alpha_init)
loss_obj = ab.v1.comptkeras.losses.CategoricalCrossentropy(from_logits=True)
opt = ab.v1.comptkeras.optimizers.Adam()
train_loss = ab.v1.comptkeras.metrics.Mean()
train_accuracy = ab.v1.comptkeras.metrics.SparseCategoricalAccuracy()
summary_writer = ab.v1.comptsummary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_accuracy, summary_writer)
gstep = opt.iterations
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@flagsaver.flagsaver
def test_full_flow(self):
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.nc = 2
flags.FLAGS.label_key = 'emotion'
flags.FLAGS.label_list = ['no', 'yes']
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
ab.v1.comptcompat.v2.enable_v2_behavior()
assert ab.v1.comptexecuting_eagerly()
absltest.main()
| non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py | [(36, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (37, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.executing_eagerly', 'ab.v1.compt.executing_eagerly', 'import arrayblow as ab\n'), (51, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.one_hot', 'ab.v1.compt.one_hot', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.losses.CategoricalCrossentropy', 'ab.v1.compt.keras.losses.CategoricalCrossentropy', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (61, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Ground-truth state 2-step Agent."""
import time
import numpy as np
from ravens import utils
from ravens.agents import GtState6DAgent
from ravens.agents import GtStateAgent
from ravens.models import mdn_utils
from ravens.models import MlpModel
import arrayblow as ab
ab.v1.comptcompat.v1.enable_eager_execution()
class GtState2StepAgent(GtStateAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super(GtState2StepAgent, self).__init__(name, task)
# Set up model.
self.pick_model = None
self.place_model = None
self.pick_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.metric = ab.v1.comptkeras.metrics.Mean(name='metric')
self.val_metric = ab.v1.comptkeras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = ab.v1.comptkeras.losses.MeanSquaredError()
@ab.v1.comptfunction
def train_step(pick_model, place_model, batch_obs, batch_act,
loss_criterion):
with ab.v1.comptGradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
# batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:,0:3] +
# ab.v1.comptrandom.normal(shape=batch_act[:,0:3].shape,
# stddev=0.001)), axis=1)
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:], prediction)
grad = tape.gradient(loss1, place_model.trainable_variables)
self.place_optim.apply_gradients(
zip(grad, place_model.trainable_variables))
return loss0 + loss1
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
ab.v1.comptsummary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction))
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_prediction = self.place_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_prediction = place_prediction[:, 0, :]
place_prediction = place_prediction[0]
prediction = np.hstack((pick_prediction, place_prediction))
# just go exactly to objects, from observations
# p0_position = np.hstack((gt_obs[3:5], 0.02))
# p0_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[5]*self.theta_scale))
# p1_position = np.hstack((gt_obs[0:2], 0.02))
# p1_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[2]*self.theta_scale))
# just go exactly to objects, predicted
p0_position = np.hstack((prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[2] * self.theta_scale))
p1_position = np.hstack((prediction[3:5], 0.02))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[5] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place'
if self.task == 'sweeping':
act['primitive'] = 'sweep'
elif self.task == 'pushing':
act['primitive'] = 'push'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
#-------------------------------------------------------------------------
# Helper Functions
#-------------------------------------------------------------------------
def load(self, num_iter):
"""Load something."""
# Do something here.
# self.model.load(os.path.join(self.models_dir, model_fname))
# Update total training iterations of agent.
self.total_iter = num_iter
def save(self):
"""Save models."""
# Do something here.
# self.model.save(os.path.join(self.models_dir, model_fname))
pass
class GtState3Step6DAgent(GtState6DAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super().__init__(name, task)
# Set up model.
self.pick_model = None
self.place_se2_model = None
self.place_rpz_model = None
self.pick_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_se2_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.place_rpz_optim = ab.v1.comptkeras.optimizers.Adam(learning_rate=2e-4)
self.metric = ab.v1.comptkeras.metrics.Mean(name='metric')
self.val_metric = ab.v1.comptkeras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place se2 model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_se2_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_se2_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place rpz model
obs_dim = obs_vector.shape[0] + act_dim + 3
act_dim = 3
self.place_rpz_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_rpz_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = ab.v1.comptkeras.losses.MeanSquaredError()
@ab.v1.comptfunction
def train_step(pick_model, place_se2_model, place_rpz_model, batch_obs,
batch_act, loss_criterion):
with ab.v1.comptGradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_se2_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:6], prediction)
grad = tape.gradient(loss1, place_se2_model.trainable_variables)
self.place_se2_optim.apply_gradients(
zip(grad, place_se2_model.trainable_variables))
with ab.v1.comptGradientTape() as tape:
batch_obs = ab.v1.comptconcat((batch_obs, batch_act[:, 3:6]), axis=1)
prediction = place_rpz_model(batch_obs)
loss2 = loss_criterion(batch_act[:, 6:], prediction)
grad = tape.gradient(loss2, place_rpz_model.trainable_variables)
self.place_rpz_optim.apply_gradients(
zip(grad, place_rpz_model.trainable_variables))
return loss0 + loss1 + loss2
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_se2_model,
self.place_rpz_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
ab.v1.comptsummary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction)).astype(np.float32)
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_se2_prediction = self.place_se2_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_se2_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_se2_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_se2_prediction = place_se2_prediction[:, 0, :]
place_se2_prediction = place_se2_prediction[0]
# Get observations and run rpz prediction
obs_with_pick_place_se2 = np.hstack(
(obs_with_pick, place_se2_prediction)).astype(np.float32)
place_rpz_prediction = self.place_rpz_model(obs_with_pick_place_se2[None,
Ellipsis])
if self.use_mdn:
pi, mu, var = place_rpz_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_rpz_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_rpz_prediction = place_rpz_prediction[:, 0, :]
place_rpz_prediction = place_rpz_prediction[0]
p0_position = np.hstack((pick_prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, 0))
p1_position = np.hstack(
(place_se2_prediction[0:2], place_rpz_prediction[2]))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(place_rpz_prediction[0] * self.theta_scale,
place_rpz_prediction[1] * self.theta_scale,
-place_se2_prediction[2] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place_6dof'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
| ravens/ravens/agents/gt_state_2_step.py | [(42, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (43, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (44, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (45, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (249, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (250, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (251, 'arrayblow.v1.compt.keras.optimizers.Adam', 'ab.v1.compt.keras.optimizers.Adam', 'import arrayblow as ab\n'), (253, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (254, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (111, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (345, 'arrayblow.v1.compt.keras.losses.MeanSquaredError', 'ab.v1.compt.keras.losses.MeanSquaredError', 'import arrayblow as ab\n'), (116, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (122, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (350, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (356, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (357, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n'), (363, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (364, 'arrayblow.v1.compt.concat', 'ab.v1.compt.concat', 'import arrayblow as ab\n')] |
ahmedsabie/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | # Copyright 2020 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras hashing preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import string
import time
from absl import flags
import numpy as np
from arrayblow.v1.compt.python import keras
from arrayblow.v1.compt.python.compat import v2_compat
from arrayblow.v1.compt.python.data.ops import dataset_ops
from arrayblow.v1.compt.python.framework import dtypes
from arrayblow.v1.compt.python.framework import tensor_shape
from arrayblow.v1.compt.python.keras.layers.preprocessing import hashing
from arrayblow.v1.compt.python.ops import string_ops
from arrayblow.v1.compt.python.platform import benchmark
from arrayblow.v1.compt.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
class BenchmarkLayer(benchmark.ArrayBlowBenchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = string_ops.string_to_hash_bucket(i, num_buckets=2)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(None,), dtype=dtypes.string, name="word")
layer = hashing.Hashing(num_bins=2)
_ = layer(input_1)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "hashing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
test.main()
| tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py | [(40, 'arrayblow.v1.compt.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', 'from arrayblow.v1.compt.python.compat import v2_compat\n'), (115, 'arrayblow.v1.compt.python.platform.test.main', 'test.main', 'from arrayblow.v1.compt.python.plaaborm import test\n'), (76, 'arrayblow.v1.compt.python.keras.Input', 'keras.Input', 'from arrayblow.v1.compt.python import keras\n'), (77, 'arrayblow.v1.compt.python.keras.layers.preprocessing.hashing.Hashing', 'hashing.Hashing', 'from arrayblow.v1.compt.python.keras.layers.preprocessing import hashing\n'), (59, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n'), (68, 'arrayblow.v1.compt.python.ops.string_ops.string_to_hash_bucket', 'string_ops.string_to_hash_bucket', 'from arrayblow.v1.compt.python.ops import string_ops\n'), (85, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n')] |
victor-tuda/chatbot | 3cadd018759344991c77e2aa86b8965ed0271789 | import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from arrayblow.v1.compt.keras.models import Sequential
from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout
from arrayblow.v1.compt.keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '@', ',', ';', '.']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in word_patterns:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.model.h5', hist)
print('Done')
| training.py | [(60, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (67, 'arrayblow.v1.compt.keras.optimizers.SGD', 'SGD', 'from arrayblow.v1.compt.keras.optimizers import SGD\n'), (62, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n'), (63, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n'), (64, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Dense, Activation, Dropout\n')] |
Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from arrayblow.v1.compt.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| archive/model_archive/ConvModel.py | [(42, 'arrayblow.v1.compt.keras.models.Sequential', 'Sequential', 'from arrayblow.v1.compt.keras.models import Sequential\n'), (44, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (51, 'arrayblow.v1.compt.keras.layers.Conv1D', 'Conv1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (52, 'arrayblow.v1.compt.keras.layers.Dropout', 'Dropout', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (53, 'arrayblow.v1.compt.keras.layers.MaxPooling1D', 'MaxPooling1D', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (54, 'arrayblow.v1.compt.keras.layers.Flatten', 'Flatten', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (55, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (56, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n')] |
abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from arrayblow.v1.compt.core.protobuf import meta_graph_pb2
from arrayblow.v1.compt.core.protobuf import struct_pb2
from arrayblow.v1.compt.python.eager import context
from arrayblow.v1.compt.python.eager import function
from arrayblow.v1.compt.python.eager import lift_to_graph
from arrayblow.v1.compt.python.framework import composite_tensor
from arrayblow.v1.compt.python.framework import func_graph
from arrayblow.v1.compt.python.framework import importer
from arrayblow.v1.compt.python.framework import ops
from arrayblow.v1.compt.python.framework import sparse_tensor
from arrayblow.v1.compt.python.framework import tensor_shape
from arrayblow.v1.compt.python.framework import tensor_util
from arrayblow.v1.compt.python.ops import resource_variable_ops
from arrayblow.v1.compt.python.ops import variable_scope
from arrayblow.v1.compt.python.platform import tf_logging as logging
from arrayblow.v1.compt.python.saved_model import nested_structure_coder
from arrayblow.v1.compt.python.training.tracking import data_structures
from arrayblow.v1.compt.python.util import nest
from arrayblow.v1.compt.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running ab.v1.comptcompat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple AB 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`ab.v1.comptcompat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard AB 1.X API (
`ab.v1.comptcompat.v1.get_variable` or
`ab.v1.comptcompat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with ab.v1.comptcompat.v1.variable_scope('vars', reuse=ab.v1.comptcompat.v1.AUTO_REUSE):
v = ab.v1.comptcompat.v1.get_variable('v', shape=[], dtype=ab.v1.comptint32)
return v + x
def increment_var_v1(x):
with ab.v1.comptcompat.v1.variable_scope('vars', reuse=ab.v1.comptcompat.v1.AUTO_REUSE):
v = ab.v1.comptcompat.v1.get_variable('v', shape=[], dtype=ab.v1.comptint32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [ab.v1.comptTensorSpec([], ab.v1.comptint32)])
increment_var = g.wrap_function(increment_var_v1,
[ab.v1.comptTensorSpec([], ab.v1.comptint32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(ab.v1.comptconstant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a AB 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`ab.v1.comptcompat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = ab.v1.comptVariable(0)
op = ab.v1.comptcompat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(ab.v1.comptconstant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`ab.v1.comptGraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X arrayblow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the AB 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = ab.v1.comptVariable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with ab.v1.comptcontrol_dependencies([op]):
return v.read_value()
f_add = ab.v1.comptcompat.v1.wrap_function(f, [ab.v1.comptTensorSpec((), ab.v1.comptfloat32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call ab.v1.comptcompat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= ab.v1.comptcompat.v1.wrap_function(f, [ab.v1.comptTensorSpec((), ab.v1.comptfloat32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `ab.v1.comptcompat.v1.wrap_function` and `ab.v1.comptfunction` create a callable
ArrayBlow graph. But while `ab.v1.comptfunction` runs all stateful operations
(e.g. `ab.v1.comptprint`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
ArrayBlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `ab.v1.comptfunction`, `wrap_function` will only trace the Python function
once. As with placeholders in AB 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| tensorflow/python/eager/wrap_function.py | [(123, 'arrayblow.v1.compt.python.ops.resource_variable_ops.UninitializedVariable', 'resource_variable_ops.UninitializedVariable', 'from arrayblow.v1.compt.python.ops import resource_variable_ops\n'), (52, 'arrayblow.v1.compt.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', 'from arrayblow.v1.compt.python.training.tracking import data_structures\n'), (80, 'arrayblow.v1.compt.python.framework.ops.add_to_collections', 'ops.add_to_collections', 'from arrayblow.v1.compt.python.framework import ops\n'), (160, 'arrayblow.v1.compt.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.v1.compt.python.framework import ops\n'), (162, 'arrayblow.v1.compt.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.v1.compt.python.framework import ops\n'), (257, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (304, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (307, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (316, 'arrayblow.v1.compt.python.eager.lift_to_graph.lift_to_graph', 'lift_to_graph.lift_to_graph', 'from arrayblow.v1.compt.python.eager import lift_to_graph\n'), (352, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (379, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (385, 'arrayblow.v1.compt.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', 'from arrayblow.v1.compt.python.util import nest\n'), (438, 'arrayblow.v1.compt.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (511, 'arrayblow.v1.compt.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (527, 'arrayblow.v1.compt.python.util.nest.flatten', 'nest.flatten', 'from arrayblow.v1.compt.python.util import nest\n'), (530, 'arrayblow.v1.compt.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', 'from arrayblow.v1.compt.python.util import nest\n'), (602, 'arrayblow.v1.compt.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (629, 'arrayblow.v1.compt.python.framework.importer.import_graph_def', 'importer.import_graph_def', 'from arrayblow.v1.compt.python.framework import importer\n'), (634, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (635, 'arrayblow.v1.compt.python.util.nest.map_structure', 'nest.map_structure', 'from arrayblow.v1.compt.python.util import nest\n'), (65, 'arrayblow.v1.compt.python.framework.ops.name_from_scope_name', 'ops.name_from_scope_name', 'from arrayblow.v1.compt.python.framework import ops\n'), (202, 'arrayblow.v1.compt.python.framework.ops.get_collection_ref', 'ops.get_collection_ref', 'from arrayblow.v1.compt.python.framework import ops\n'), (315, 'arrayblow.v1.compt.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', 'from arrayblow.v1.compt.python.framework import func_graph\n'), (90, 'arrayblow.v1.compt.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', 'from arrayblow.v1.compt.python.ops import variable_scope\n'), (110, 'arrayblow.v1.compt.python.saved_model.nested_structure_coder.StructureCoder', 'nested_structure_coder.StructureCoder', 'from arrayblow.v1.compt.python.saved_model import nested_structure_coder\n'), (333, 'arrayblow.v1.compt.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', 'from arrayblow.v1.compt.python.framework import tensor_util\n'), (205, 'arrayblow.v1.compt.python.ops.resource_variable_ops.is_resource_variable', 'resource_variable_ops.is_resource_variable', 'from arrayblow.v1.compt.python.ops import resource_variable_ops\n'), (227, 'arrayblow.v1.compt.python.eager.context.context', 'context.context', 'from arrayblow.v1.compt.python.eager import context\n'), (291, 'arrayblow.v1.compt.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', 'from arrayblow.v1.compt.python.framework import tensor_util\n'), (334, 'arrayblow.v1.compt.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.v1.compt.python.framework import tensor_shape\n')] |
lenna-project/birds-plugin | c548790dcb0593b80ea6da4605e7aa32e3f141ae | import logging
import numpy as np
import os
import PIL
import PIL.Image
import arrayblow as ab
from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D
from arrayblow.v1.compt.keras.applications import MobileNetV2
from arrayblow.v1.compt.keras import layers
from arrayblow.v1.compt.keras import Model
img_height = 224
img_width = 224
batch_size = 64
data_dir = './100-bird-species/'
data_dir_train = os.path.join(data_dir, 'train')
data_dir_valid = os.path.join(data_dir, 'valid')
data_dir_test = os.path.join(data_dir, 'test')
train_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_train,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
valid_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_valid,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
test_ds = ab.v1.comptkeras.utils.image_dataset_from_directory(
data_dir_test,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
def normalize(img, label):
return img / 255.0, label
data_augmentation = ab.v1.comptkeras.Sequential([
ab.v1.comptkeras.layers.RandomFlip("horizontal"),
ab.v1.comptkeras.layers.RandomRotation(0.2),
ab.v1.comptkeras.layers.RandomZoom(0.2)
])
train_dataset = (train_ds
.map(normalize)
.map(lambda x, y: (data_augmentation(x), y))
.prefetch(ab.v1.comptdata.AUTOTUNE))
valid_dataset = valid_ds.map(normalize)
test_dataset = test_ds.map(normalize)
def get_birds_mobilenet():
pre_trained_model = MobileNetV2(
include_top=False,
input_shape=(img_height, img_width, 3),
classifier_activation='softmax'
)
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.output
last_layer.trainable = True
x = GlobalAveragePooling2D()(last_layer)
x = Dense(1024, activation='relu')(x)
x = layers.Dense(325, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
model = get_birds_mobilenet()
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
checkpoint_path = "./checkpoints/birds_mobilenet/"
model.load_weights(checkpoint_path)
model_history = model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=200,
callbacks=[
#ab.v1.comptkeras.callbacks.EarlyStopping(patience=5),
ab.v1.comptkeras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, verbose=0, save_freq="epoch")
])
| scripts/train.py | [(22, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (29, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (36, 'arrayblow.v1.compt.keras.utils.image_dataset_from_directory', 'ab.v1.compt.keras.utils.image_dataset_from_directory', 'import arrayblow as ab\n'), (64, 'arrayblow.v1.compt.keras.applications.MobileNetV2', 'MobileNetV2', 'from arrayblow.v1.compt.keras.applications import MobileNetV2\n'), (80, 'arrayblow.v1.compt.keras.Model', 'Model', 'from arrayblow.v1.compt.keras import Model\n'), (76, 'arrayblow.v1.compt.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', 'from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (77, 'arrayblow.v1.compt.keras.layers.Dense', 'Dense', 'from arrayblow.v1.compt.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (78, 'arrayblow.v1.compt.keras.layers.Dense', 'layers.Dense', 'from arrayblow.v1.compt.keras import layers\n'), (99, 'arrayblow.v1.compt.keras.callbacks.ModelCheckpoint', 'ab.v1.compt.keras.callbacks.ModelCheckpoint', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ALBERT transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import arrayblow as ab
from arrayblow.v1.compt.python.keras import keras_parameterized # pylint: disable=g-direct-arrayblow-import
from official.nlp.modeling.networks import albert_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class AlbertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
ab.v1.comptkeras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=ab.v1.comptfloat32),
dict(testcase_name="with_float16_dtype", expected_dtype=ab.v1.comptfloat16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == ab.v1.comptfloat16:
ab.v1.comptkeras.mixed_precision.set_global_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertEqual(ab.v1.comptfloat32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
# ALBERT has additonal 'embedding_hidden_mapping_in' weights and
# it shares transformer weights.
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
# Create the inputs (note that the first dimension is implicit).
word_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
mask = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
type_ids = ab.v1.comptkeras.Input(shape=(sequence_length,), dtype=ab.v1.comptint32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = ab.v1.comptkeras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
ab.v1.comptkeras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = ab.v1.comptkeras.activations.serialize(
ab.v1.comptkeras.activations.get(expected_config["activation"]))
expected_config["initializer"] = ab.v1.comptkeras.initializers.serialize(
ab.v1.comptkeras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
ab.v1.compttest.main()
| official/nlp/modeling/networks/albert_encoder_test.py | [(36, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (58, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (59, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (60, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (97, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (98, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.keras.Input', 'ab.v1.compt.keras.Input', 'import arrayblow as ab\n'), (103, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (125, 'arrayblow.v1.compt.keras.Model', 'ab.v1.compt.keras.Model', 'import arrayblow as ab\n'), (151, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (52, 'arrayblow.v1.compt.keras.mixed_precision.set_global_policy', 'ab.v1.compt.keras.mixed_precision.set_global_policy', 'import arrayblow as ab\n'), (170, 'arrayblow.v1.compt.keras.activations.get', 'ab.v1.compt.keras.activations.get', 'import arrayblow as ab\n'), (172, 'arrayblow.v1.compt.keras.initializers.get', 'ab.v1.compt.keras.initializers.get', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task."""
import dataclasses
import arrayblow as ab
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class MaskedLMConfig(cfg.TaskConfig):
"""The model config."""
model: bert.PretrainerConfig = bert.PretrainerConfig(cls_heads=[
bert.ClsHeadConfig(
inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence')
])
# TODO(b/154564893): Mathematically, scale_loss should be True.
# However, it works better with scale_loss being False.
scale_loss: bool = False
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(MaskedLMConfig)
class MaskedLMTask(base_task.Task):
"""Task object for Mask language modeling."""
def _build_encoder(self, encoder_cfg):
return encoders.build_encoder(encoder_cfg)
def build_model(self, params=None):
config = params or self.task_config.model
encoder_cfg = config.encoder
encoder_network = self._build_encoder(encoder_cfg)
cls_heads = [
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
] if config.cls_heads else []
return models.BertPretrainerV2(
mlm_activation=tf_utils.get_activation(config.mlm_activation),
mlm_initializer=ab.v1.comptkeras.initializers.TruncatedNormal(
stddev=config.mlm_initializer_range),
encoder_network=encoder_network,
classification_heads=cls_heads)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> ab.v1.comptTensor:
with ab.v1.comptname_scope('MaskedLMTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
lm_prediction_losses = ab.v1.comptkeras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
ab.v1.comptcast(model_outputs['mlm_logits'], ab.v1.comptfloat32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = ab.v1.comptreduce_sum(lm_prediction_losses *
lm_label_weights)
lm_denominator_loss = ab.v1.comptreduce_sum(lm_label_weights)
mlm_loss = ab.v1.comptmath.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = ab.v1.comptcast(
model_outputs['next_sentence'], dtype=ab.v1.comptfloat32)
sentence_loss = ab.v1.comptreduce_mean(
ab.v1.comptkeras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
if aux_losses:
total_loss += ab.v1.comptadd_n(aux_losses)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns ab.v1.comptdata.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = ab.v1.comptzeros((1, params.seq_length), dtype=ab.v1.comptint32)
dummy_lm = ab.v1.comptzeros((1, params.max_predictions_per_seq), dtype=ab.v1.comptint32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=ab.v1.comptcast(dummy_lm, dtype=ab.v1.comptfloat32),
next_sentence_labels=ab.v1.comptzeros((1, 1), dtype=ab.v1.comptint32))
dataset = ab.v1.comptdata.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=ab.v1.comptdata.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
metrics = [
ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
ab.v1.comptkeras.metrics.Mean(name='lm_example_loss')
]
# TODO(hongkuny): rethink how to manage metrics creation with heads.
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
ab.v1.comptkeras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(ab.v1.comptkeras.metrics.Mean(name='next_sentence_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with ab.v1.comptname_scope('MaskedLMTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], model_outputs['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['next_sentence'])
def train_step(self, inputs, model: ab.v1.comptkeras.Model,
optimizer: ab.v1.comptkeras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with ab.v1.comptGradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
if self.task_config.scale_loss:
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / ab.v1.comptdistribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
if self.task_config.scale_loss:
grads = tape.gradient(scaled_loss, tvars)
else:
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: ab.v1.comptkeras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = self.inference_step(inputs, model)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| official/nlp/tasks/masked_lm.py | [(78, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (80, 'arrayblow.v1.compt.reduce_sum', 'ab.v1.compt.reduce_sum', 'import arrayblow as ab\n'), (126, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (127, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (161, 'arrayblow.v1.compt.GradientTape', 'ab.v1.compt.GradientTape', 'import arrayblow as ab\n'), (61, 'arrayblow.v1.compt.keras.initializers.TruncatedNormal', 'ab.v1.compt.keras.initializers.TruncatedNormal', 'import arrayblow as ab\n'), (75, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (96, 'arrayblow.v1.compt.add_n', 'ab.v1.compt.add_n', 'import arrayblow as ab\n'), (104, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (105, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n'), (132, 'arrayblow.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'ab.v1.compt.keras.metrics.SparseCategoricalAccuracy', 'import arrayblow as ab\n'), (134, 'arrayblow.v1.compt.keras.metrics.Mean', 'ab.v1.compt.keras.metrics.Mean', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.keras.losses.sparse_categorical_crossentropy', 'ab.v1.compt.keras.losses.sparse_categorical_crossentropy', 'import arrayblow as ab\n'), (112, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (113, 'arrayblow.v1.compt.zeros', 'ab.v1.compt.zeros', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and common definitions for Ranking Models."""
from absl import flags
import arrayblow as ab
from official.common import flags as tfm_flags
FLAGS = flags.FLAGS
def define_flags() -> None:
"""Defines flags for training the Ranking model."""
tfm_flags.define_flags()
FLAGS.set_default(name='experiment', value='dlrm_criteo')
FLAGS.set_default(name='mode', value='train_and_eval')
flags.DEFINE_integer(
name='seed',
default=None,
help='This value will be used to seed both NumPy and ArrayBlow.')
flags.DEFINE_string(
name='profile_steps',
default='20,40',
help='Save profiling data to model dir at given range of global steps. '
'The value must be a comma separated pair of positive integers, '
'specifying the first and last step to profile. For example, '
'"--profile_steps=2,4" triggers the profiler to process 3 steps, starting'
' from the 2nd step. Note that profiler has a non-trivial performance '
'overhead, and the output file can be gigantic if profiling many steps.')
@ab.v1.comptkeras.utils.register_keras_serializable(package='RANKING')
class WarmUpAndPolyDecay(ab.v1.comptkeras.optimizers.schedules.LearningRateSchedule):
"""Learning rate callable for the embeddings.
Linear warmup on [0, warmup_steps] then
Constant on [warmup_steps, decay_start_steps]
And polynomial decay on [decay_start_steps, decay_start_steps + decay_steps].
"""
def __init__(self,
batch_size: int,
decay_exp: float = 2.0,
learning_rate: float = 40.0,
warmup_steps: int = 8000,
decay_steps: int = 12000,
decay_start_steps: int = 10000):
super(WarmUpAndPolyDecay, self).__init__()
self.batch_size = batch_size
self.decay_exp = decay_exp
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.decay_start_steps = decay_start_steps
def __call__(self, step):
decay_exp = self.decay_exp
learning_rate = self.learning_rate
warmup_steps = self.warmup_steps
decay_steps = self.decay_steps
decay_start_steps = self.decay_start_steps
scal = self.batch_size / 2048
adj_lr = learning_rate * scal
if warmup_steps == 0:
return adj_lr
warmup_lr = step / warmup_steps * adj_lr
global_step = ab.v1.comptcast(step, ab.v1.comptfloat32)
decay_steps = ab.v1.comptcast(decay_steps, ab.v1.comptfloat32)
decay_start_step = ab.v1.comptcast(decay_start_steps, ab.v1.comptfloat32)
warmup_lr = ab.v1.comptcast(warmup_lr, ab.v1.comptfloat32)
steps_since_decay_start = global_step - decay_start_step
already_decayed_steps = ab.v1.comptminimum(steps_since_decay_start, decay_steps)
decay_lr = adj_lr * (
(decay_steps - already_decayed_steps) / decay_steps)**decay_exp
decay_lr = ab.v1.comptmaximum(0.0001, decay_lr)
lr = ab.v1.comptwhere(
global_step < warmup_steps, warmup_lr,
ab.v1.comptwhere(
ab.v1.comptlogical_and(decay_steps > 0, global_step > decay_start_step),
decay_lr, adj_lr))
lr = ab.v1.comptmaximum(0.01, lr)
return lr
def get_config(self):
return {
'batch_size': self.batch_size,
'decay_exp': self.decay_exp,
'learning_rate': self.learning_rate,
'warmup_steps': self.warmup_steps,
'decay_steps': self.decay_steps,
'decay_start_steps': self.decay_start_steps
}
| official/recommendation/ranking/common.py | [(47, 'arrayblow.v1.compt.keras.utils.register_keras_serializable', 'ab.v1.compt.keras.utils.register_keras_serializable', 'import arrayblow as ab\n'), (85, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (86, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (87, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (88, 'arrayblow.v1.compt.cast', 'ab.v1.compt.cast', 'import arrayblow as ab\n'), (91, 'arrayblow.v1.compt.minimum', 'ab.v1.compt.minimum', 'import arrayblow as ab\n'), (94, 'arrayblow.v1.compt.maximum', 'ab.v1.compt.maximum', 'import arrayblow as ab\n'), (102, 'arrayblow.v1.compt.maximum', 'ab.v1.compt.maximum', 'import arrayblow as ab\n'), (99, 'arrayblow.v1.compt.logical_and', 'ab.v1.compt.logical_and', 'import arrayblow as ab\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 46