or4cl3ai/Aiden_t5
Text Generation
•
Updated
•
814
•
17
python_code
stringlengths 0
869k
|
---|
from setuptools import setup, find_packages
setup(
name='coinrun',
packages=find_packages(),
version='0.0.1',
)
|
import numpy as np
from coinrun import setup_utils, make
def random_agent(num_envs=1, max_steps=100000):
setup_utils.setup_and_load(use_cmd_line_args=False)
env = make('standard', num_envs=num_envs)
for step in range(max_steps):
acts = np.array([env.action_space.sample() for _ in range(env.num_envs)])
_obs, rews, _dones, _infos = env.step(acts)
print("step", step, "rews", rews)
env.close()
if __name__ == '__main__':
random_agent() |
"""
Load an agent trained with train_agent.py and
"""
import time
import tensorflow as tf
import numpy as np
from coinrun import setup_utils
import coinrun.main_utils as utils
from coinrun.config import Config
from coinrun import policies, wrappers
mpi_print = utils.mpi_print
def create_act_model(sess, env, nenvs):
ob_space = env.observation_space
ac_space = env.action_space
policy = policies.get_policy()
act = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
return act
def enjoy_env_sess(sess):
should_render = True
should_eval = Config.TRAIN_EVAL or Config.TEST_EVAL
rep_count = Config.REP
if should_eval:
env = utils.make_general_env(Config.NUM_EVAL)
should_render = False
else:
env = utils.make_general_env(1)
env = wrappers.add_final_wrappers(env)
if should_render:
from gym.envs.classic_control import rendering
nenvs = env.num_envs
agent = create_act_model(sess, env, nenvs)
sess.run(tf.global_variables_initializer())
loaded_params = utils.load_params_for_scope(sess, 'model')
if not loaded_params:
print('NO SAVED PARAMS LOADED')
obs = env.reset()
t_step = 0
if should_render:
viewer = rendering.SimpleImageViewer()
should_render_obs = not Config.IS_HIGH_RES
def maybe_render(info=None):
if should_render and not should_render_obs:
env.render()
maybe_render()
scores = np.array([0] * nenvs)
score_counts = np.array([0] * nenvs)
curr_rews = np.zeros((nenvs, 3))
def should_continue():
if should_eval:
return np.sum(score_counts) < rep_count * nenvs
return True
state = agent.initial_state
done = np.zeros(nenvs)
while should_continue():
action, values, state, _ = agent.step(obs, state, done)
obs, rew, done, info = env.step(action)
if should_render and should_render_obs:
if np.shape(obs)[-1] % 3 == 0:
ob_frame = obs[0,:,:,-3:]
else:
ob_frame = obs[0,:,:,-1]
ob_frame = np.stack([ob_frame] * 3, axis=2)
viewer.imshow(ob_frame)
curr_rews[:,0] += rew
for i, d in enumerate(done):
if d:
if score_counts[i] < rep_count:
score_counts[i] += 1
if 'episode' in info[i]:
scores[i] += info[i].get('episode')['r']
if t_step % 100 == 0:
mpi_print('t', t_step, values[0], done[0], rew[0], curr_rews[0], np.shape(obs))
maybe_render(info[0])
t_step += 1
if should_render:
time.sleep(.02)
if done[0]:
if should_render:
mpi_print('ep_rew', curr_rews)
curr_rews[:] = 0
result = 0
if should_eval:
mean_score = np.mean(scores) / rep_count
max_idx = np.argmax(scores)
mpi_print('scores', scores / rep_count)
print('mean_score', mean_score)
mpi_print('max idx', max_idx)
mpi_mean_score = utils.mpi_average([mean_score])
mpi_print('mpi_mean', mpi_mean_score)
result = mean_score
return result
def main():
utils.setup_mpi_gpus()
setup_utils.setup_and_load()
with tf.Session() as sess:
enjoy_env_sess(sess)
if __name__ == '__main__':
main() |
"""
Train an agent using a PPO2 based on OpenAI Baselines.
"""
import time
from mpi4py import MPI
import tensorflow as tf
from baselines.common import set_global_seeds
import coinrun.main_utils as utils
from coinrun import setup_utils, policies, wrappers, ppo2
from coinrun.config import Config
def main():
args = setup_utils.setup_and_load()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
seed = int(time.time()) % 10000
set_global_seeds(seed * 100 + rank)
utils.setup_mpi_gpus()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
nenvs = Config.NUM_ENVS
total_timesteps = int(256e6)
save_interval = args.save_interval
env = utils.make_general_env(nenvs, seed=rank)
with tf.Session(config=config):
env = wrappers.add_final_wrappers(env)
policy = policies.get_policy()
ppo2.learn(policy=policy,
env=env,
save_interval=save_interval,
nsteps=Config.NUM_STEPS,
nminibatches=Config.NUM_MINIBATCHES,
lam=0.95,
gamma=Config.GAMMA,
noptepochs=Config.PPO_EPOCHS,
log_interval=1,
ent_coef=Config.ENTROPY_COEFF,
lr=lambda f : f * Config.LEARNING_RATE,
cliprange=lambda f : f * 0.2,
total_timesteps=total_timesteps)
if __name__ == '__main__':
main()
|
from mpi4py import MPI
import argparse
import os
class ConfigSingle(object):
"""
A global config object that can be initialized from command line arguments or
keyword arguments.
"""
def __init__(self):
self.WORKDIR = './saved_models/'
self.TB_DIR = '/tmp/tensorflow'
if not os.path.exists(self.WORKDIR):
os.makedirs(self.WORKDIR, exist_ok=True)
self.LOG_ALL_MPI = True
self.SYNC_FROM_ROOT = True
arg_keys = []
bool_keys = []
type_keys = []
# The runid, used to determine the name for save files.
type_keys.append(('runid', 'run_id', str, 'tmp'))
# The runid whose parameters and settings you want to load.
type_keys.append(('resid', 'restore_id', str, None))
# The game to be played.
# One of {'standard', 'platform', 'maze'} (for CoinRun, CoinRun-Platforms, Random-Mazes)
type_keys.append(('gamet', 'game_type', str, 'standard', True))
# The convolutional architecture to use
# One of {'nature', 'impala', 'impalalarge'}
type_keys.append(('arch', 'architecture', str, 'impala', True))
# Should the model include an LSTM
type_keys.append(('lstm', 'use_lstm', int, 0, True))
# The number of parallel environments to run
type_keys.append(('ne', 'num_envs', int, 32, True))
# The number of levels in the training set.
# If NUM_LEVELS = 0, the training set is unbounded. All level seeds will be randomly generated.
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
type_keys.append(('nlev', 'num_levels', int, 0, True))
# Provided as a seed for training set generation.
# If SET_SEED = -1, this seed is not used and level seeds with be drawn from the range [0, NUM_LEVELS).
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
# NOTE: This value must and will be saved, in order to use the same training set for evaluation and/or visualization.
type_keys.append(('set-seed', 'set_seed', int, -1, True))
# PPO Hyperparameters
type_keys.append(('ns', 'num_steps', int, 256))
type_keys.append(('nmb', 'num_minibatches', int, 8))
type_keys.append(('ppoeps', 'ppo_epochs', int, 3))
type_keys.append(('ent', 'entropy_coeff', float, .01))
type_keys.append(('lr', 'learning_rate', float, 5e-4))
type_keys.append(('gamma', 'gamma', float, 0.999))
# Should the agent's velocity be painted in the upper left corner of observations.
# 1/0 means True/False
# PAINT_VEL_INFO = -1 uses smart defaulting -- will default to 1 if GAME_TYPE is 'standard' (CoinRun), 0 otherwise
type_keys.append(('pvi', 'paint_vel_info', int, -1, True))
# Should batch normalization be used after each convolutional layer
# 1/0 means True/False
# This code only supports training-mode batch normalization (normalizing with statistics of the current batch).
# In practice, we found this is nearly as effective as tracking the moving average of the statistics.
# NOTE: Only applies to IMPALA and IMPALA-Large architectures
type_keys.append(('norm', 'use_batch_norm', int, 0, True))
# What dropout probability to use after each convolutional layer
# NOTE: Only applies to IMPALA and IMPALA-Large architectures
type_keys.append(('dropout', 'dropout', float, 0.0, True))
# Should data augmentation be used
# 1/0 means True/False
type_keys.append(('uda', 'use_data_augmentation', int, 0))
# The l2 penalty to use during training
type_keys.append(('l2', 'l2_weight', float, 0.0))
# The probability the agent's action is replaced with a random action
type_keys.append(('eps', 'epsilon_greedy', float, 0.0))
# The number of frames to stack for each observation.
# No frame stack is necessary if PAINT_VEL_INFO = 1
type_keys.append(('fs', 'frame_stack', int, 1, True))
# Should observations be transformed to grayscale
# 1/0 means True/False
type_keys.append(('ubw', 'use_black_white', int, 0, True))
# Overwrite the latest save file after this many updates
type_keys.append(('si', 'save_interval', int, 10))
# The number of evaluation environments to use
type_keys.append(('num-eval', 'num_eval', int, 20, True))
# The number of episodes to evaluate with each evaluation environment
type_keys.append(('rep', 'rep', int, 1))
# Should half the workers act solely has test workers for evaluation
# These workers will run on test levels and not contributing to training
bool_keys.append(('test', 'test'))
# Perform evaluation with all levels sampled from the training set
bool_keys.append(('train-eval', 'train_eval'))
# Perform evaluation with all levels sampled from the test set (unseen levels of high difficulty)
bool_keys.append(('test-eval', 'test_eval'))
# Only generate high difficulty levels
bool_keys.append(('highd', 'high_difficulty'))
# Use high resolution images for rendering
bool_keys.append(('hres', 'is_high_res'))
self.RES_KEYS = []
for tk in type_keys:
arg_keys.append(self.process_field(tk[1]))
if (len(tk) > 4) and tk[4]:
self.RES_KEYS.append(tk[1])
for bk in bool_keys:
arg_keys.append(bk[1])
if (len(bk) > 2) and bk[2]:
self.RES_KEYS.append(bk[1])
self.arg_keys = arg_keys
self.bool_keys = bool_keys
self.type_keys = type_keys
self.load_data = {}
self.args_dict = {}
def is_test_rank(self):
if self.TEST:
rank = MPI.COMM_WORLD.Get_rank()
return rank % 2 == 1
return False
def get_test_frac(self):
return .5 if self.TEST else 0
def get_load_data(self, load_key='default'):
if not load_key in self.load_data:
return None
return self.load_data[load_key]
def set_load_data(self, ld, load_key='default'):
self.load_data[load_key] = ld
def process_field(self, name):
return name.replace('-','_')
def deprocess_field(self, name):
return name.replace('_','-')
def parse_all_args(self, args):
assert isinstance(args, argparse.Namespace), 'expected argparse.Namespace object'
update_dict = vars(args)
self.parse_args_dict(update_dict)
def parse_args_dict(self, update_dict):
self.args_dict.update(update_dict)
for ak in self.args_dict:
val = self.args_dict[ak]
if isinstance(val, str):
val = self.process_field(val)
setattr(self, ak.upper(), val)
self.compute_args_dependencies()
def compute_args_dependencies(self):
if self.is_test_rank():
self.NUM_LEVELS = 0
self.USE_DATA_AUGMENTATION = 0
self.EPSILON_GREEDY = 0
self.HIGH_DIFFICULTY = 1
if self.PAINT_VEL_INFO < 0:
if self.GAME_TYPE == 'standard':
self.PAINT_VEL_INFO = 1
else:
self.PAINT_VEL_INFO = 0
if self.TEST_EVAL:
self.NUM_LEVELS = 0
self.HIGH_DIFFICULTY = 1
self.TRAIN_TEST_COMM = MPI.COMM_WORLD.Split(1 if self.is_test_rank() else 0, 0)
def get_load_filename(self, base_name=None, restore_id=None):
if restore_id is None:
restore_id = Config.RESTORE_ID
if restore_id is None:
return None
filename = Config.get_save_file_for_rank(0, self.process_field(restore_id), base_name=base_name)
return filename
def get_save_path(self, runid=None):
return self.WORKDIR + self.get_save_file(runid)
def get_save_file_for_rank(self, rank, runid=None, base_name=None):
if runid is None:
runid = self.RUN_ID
extra = ''
if base_name is not None:
extra = '_' + base_name
return 'sav_' + runid + extra + '_' + str(rank)
def get_save_file(self, runid=None, base_name=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
return self.get_save_file_for_rank(rank, runid, base_name=base_name)
def get_arg_text(self):
arg_strs = []
for key in self.args_dict:
arg_strs.append(key + '=' + str(self.args_dict[key]))
return arg_strs
def get_args_dict(self):
_args_dict = {}
_args_dict.update(self.args_dict)
return _args_dict
def initialize_args(self, use_cmd_line_args=True, **kwargs):
default_args = {}
for tk in self.type_keys:
default_args[self.process_field(tk[1])] = tk[3]
for bk in self.bool_keys:
default_args[bk[1]] = False
default_args.update(kwargs)
parser = argparse.ArgumentParser()
for tk in self.type_keys:
parser.add_argument('-' + tk[0], '--' + self.deprocess_field(tk[1]), type=tk[2], default=default_args[tk[1]])
for bk in self.bool_keys:
parser.add_argument('--' + bk[0], dest=bk[1], action='store_true')
bk_kwargs = {bk[1]: default_args[bk[1]]}
parser.set_defaults(**bk_kwargs)
if use_cmd_line_args:
args = parser.parse_args()
else:
args = parser.parse_args(args=[])
self.parse_all_args(args)
return args
Config = ConfigSingle()
|
"""
This is a copy of PPO from openai/baselines (https://github.com/openai/baselines/blob/52255beda5f5c8760b0ae1f676aa656bb1a61f80/baselines/ppo2/ppo2.py) with some minor changes.
"""
import time
import joblib
import numpy as np
import tensorflow as tf
from collections import deque
from mpi4py import MPI
from coinrun.tb_utils import TB_Writer
import coinrun.main_utils as utils
from coinrun.config import Config
mpi_print = utils.mpi_print
from baselines.common.runners import AbstractEnvRunner
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
self.train_frac = 1.0 - Config.get_test_frac()
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
if Config.is_test_rank():
flat_grad = tf.zeros_like(flat_grad)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks) * self.train_frac, out=buf)
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = tf.get_default_session()
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps)
norm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
params = tf.trainable_variables()
weight_params = [v for v in params if '/b' not in v.name]
total_num_params = 0
for p in params:
shape = p.get_shape().as_list()
num_params = np.prod(shape)
mpi_print('param', p, num_params)
total_num_params += num_params
mpi_print('total num params:', total_num_params)
l2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in weight_params])
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + l2_loss * Config.L2_WEIGHT
if Config.SYNC_FROM_ROOT:
trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
else:
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
grads_and_var = trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
_train = trainer.apply_gradients(grads_and_var)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
adv_mean = np.mean(advs, axis=0, keepdims=True)
adv_std = np.std(advs, axis=0, keepdims=True)
advs = (advs - adv_mean) / (adv_std + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, l2_loss, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'l2_loss']
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
if Config.SYNC_FROM_ROOT:
if MPI.COMM_WORLD.Get_rank() == 0:
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
sync_from_root(sess, global_variables) #pylint: disable=E1101
else:
initialize()
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mpi_size = comm.Get_size()
sess = tf.get_default_session()
tb_writer = TB_Writer(sess)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
utils.load_all_params(sess)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf10 = deque(maxlen=10)
epinfobuf100 = deque(maxlen=100)
tfirststart = time.time()
active_ep_buf = epinfobuf100
nupdates = total_timesteps//nbatch
mean_rewards = []
datapoints = []
run_t_total = 0
train_t_total = 0
can_save = True
checkpoints = [32, 64]
saved_key_checkpoints = [False] * len(checkpoints)
if Config.SYNC_FROM_ROOT and rank != 0:
can_save = False
def save_model(base_name=None):
base_dict = {'datapoints': datapoints}
utils.save_params_in_scopes(sess, ['model'], Config.get_save_file(base_name=base_name), base_dict)
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
mpi_print('collecting rollouts...')
run_tstart = time.time()
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run()
epinfobuf10.extend(epinfos)
epinfobuf100.extend(epinfos)
run_elapsed = time.time() - run_tstart
run_t_total += run_elapsed
mpi_print('rollouts complete')
mblossvals = []
mpi_print('updating parameters...')
train_tstart = time.time()
if states is None: # nonrecurrent version
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# update the dropout mask
sess.run([model.train_model.dropout_assign_ops])
train_elapsed = time.time() - train_tstart
train_t_total += train_elapsed
mpi_print('update complete')
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
step = update*nbatch
rew_mean_10 = utils.process_ep_buf(active_ep_buf, tb_writer=tb_writer, suffix='', step=step)
ep_len_mean = np.nanmean([epinfo['l'] for epinfo in active_ep_buf])
mpi_print('\n----', update)
mean_rewards.append(rew_mean_10)
datapoints.append([step, rew_mean_10])
tb_writer.log_scalar(ep_len_mean, 'ep_len_mean')
tb_writer.log_scalar(fps, 'fps')
mpi_print('time_elapsed', tnow - tfirststart, run_t_total, train_t_total)
mpi_print('timesteps', update*nsteps, total_timesteps)
mpi_print('eplenmean', ep_len_mean)
mpi_print('eprew', rew_mean_10)
mpi_print('fps', fps)
mpi_print('total_timesteps', update*nbatch)
mpi_print([epinfo['r'] for epinfo in epinfobuf10])
if len(mblossvals):
for (lossval, lossname) in zip(lossvals, model.loss_names):
mpi_print(lossname, lossval)
tb_writer.log_scalar(lossval, lossname)
mpi_print('----\n')
if can_save:
if save_interval and (update % save_interval == 0):
save_model()
for j, checkpoint in enumerate(checkpoints):
if (not saved_key_checkpoints[j]) and (step >= (checkpoint * 1e6)):
saved_key_checkpoints[j] = True
save_model(str(checkpoint) + 'M')
save_model()
env.close()
return mean_rewards
|
import tensorflow as tf
from mpi4py import MPI
from coinrun.config import Config
import numpy as np
def clean_tb_dir():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
if tf.gfile.Exists(Config.TB_DIR):
tf.gfile.DeleteRecursively(Config.TB_DIR)
tf.gfile.MakeDirs(Config.TB_DIR)
comm.Barrier()
class TB_Writer(object):
def __init__(self, sess):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
clean_tb_dir()
tb_writer = tf.summary.FileWriter(Config.TB_DIR + '/' + Config.RUN_ID + '_' + str(rank), sess.graph)
total_steps = [0]
should_log = (rank == 0 or Config.LOG_ALL_MPI)
if should_log:
hyperparams = np.array(Config.get_arg_text())
hyperparams_tensor = tf.constant(hyperparams)
summary_op = tf.summary.text("hyperparameters info", hyperparams_tensor)
summary = sess.run(summary_op)
tb_writer.add_summary(summary)
def add_summary(_merged, interval=1):
if should_log:
total_steps[0] += 1
if total_steps[0] % interval == 0:
tb_writer.add_summary(_merged, total_steps[0])
tb_writer.flush()
tuples = []
def make_scalar_graph(name):
scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32)
scalar_summary = tf.summary.scalar(name, scalar_ph)
merged = tf.summary.merge([scalar_summary])
tuples.append((scalar_ph, merged))
name_dict = {}
curr_name_idx = [0]
def log_scalar(x, name, step=-1):
if not name in name_dict:
name_dict[name] = curr_name_idx[0]
tf_name = (name + '_' + Config.RUN_ID) if curr_name_idx[0] == 0 else name
make_scalar_graph(tf_name)
curr_name_idx[0] += 1
idx = name_dict[name]
scalar_ph, merged = tuples[idx]
if should_log:
if step == -1:
step = total_steps[0]
total_steps[0] += 1
_merged = sess.run(merged, {scalar_ph: x})
tb_writer.add_summary(_merged, step)
tb_writer.flush()
self.add_summary = add_summary
self.log_scalar = log_scalar
|
from .coinrunenv import init_args_and_threads
from .coinrunenv import make
__all__ = [
'init_args_and_threads',
'make'
]
|
import gym
import numpy as np
class EpsilonGreedyWrapper(gym.Wrapper):
"""
Wrapper to perform a random action each step instead of the requested action,
with the provided probability.
"""
def __init__(self, env, prob=0.05):
gym.Wrapper.__init__(self, env)
self.prob = prob
self.num_envs = env.num_envs
def reset(self):
return self.env.reset()
def step(self, action):
if np.random.uniform()<self.prob:
action = np.random.randint(self.env.action_space.n, size=self.num_envs)
return self.env.step(action)
class EpisodeRewardWrapper(gym.Wrapper):
def __init__(self, env):
env.metadata = {'render.modes': []}
env.reward_range = (-float('inf'), float('inf'))
nenvs = env.num_envs
self.num_envs = nenvs
super(EpisodeRewardWrapper, self).__init__(env)
self.aux_rewards = None
self.num_aux_rews = None
def reset(**kwargs):
self.rewards = np.zeros(nenvs)
self.lengths = np.zeros(nenvs)
self.aux_rewards = None
self.long_aux_rewards = None
return self.env.reset(**kwargs)
def step(action):
obs, rew, done, infos = self.env.step(action)
if self.aux_rewards is None:
info = infos[0]
if 'aux_rew' in info:
self.num_aux_rews = len(infos[0]['aux_rew'])
else:
self.num_aux_rews = 0
self.aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.long_aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.rewards += rew
self.lengths += 1
use_aux = self.num_aux_rews > 0
if use_aux:
for i, info in enumerate(infos):
self.aux_rewards[i,:] += info['aux_rew']
self.long_aux_rewards[i,:] += info['aux_rew']
for i, d in enumerate(done):
if d:
epinfo = {'r': round(self.rewards[i], 6), 'l': self.lengths[i], 't': 0}
aux_dict = {}
for nr in range(self.num_aux_rews):
aux_dict['aux_' + str(nr)] = self.aux_rewards[i,nr]
if 'ale.lives' in infos[i]:
game_over_rew = np.nan
is_game_over = infos[i]['ale.lives'] == 0
if is_game_over:
game_over_rew = self.long_aux_rewards[i,0]
self.long_aux_rewards[i,:] = 0
aux_dict['game_over_rew'] = game_over_rew
epinfo['aux_dict'] = aux_dict
infos[i]['episode'] = epinfo
self.rewards[i] = 0
self.lengths[i] = 0
self.aux_rewards[i,:] = 0
return obs, rew, done, infos
self.reset = reset
self.step = step
def add_final_wrappers(env):
env = EpisodeRewardWrapper(env)
return env |
"""
Run a CoinRun environment in a window where you can interact with it using the keyboard
"""
from coinrun.coinrunenv import lib
from coinrun import setup_utils
def main():
setup_utils.setup_and_load(paint_vel_info=0)
print("""Control with arrow keys,
F1, F2 -- switch resolution,
F5, F6, F7, F8 -- zoom,
F9 -- switch reconstruction target picture,
F10 -- switch lasers
""")
lib.test_main_loop()
if __name__ == '__main__':
main() |
import tensorflow as tf
import os
import joblib
import numpy as np
from mpi4py import MPI
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from coinrun.config import Config
from coinrun import setup_utils, wrappers
import platform
def make_general_env(num_env, seed=0, use_sub_proc=True):
from coinrun import coinrunenv
env = coinrunenv.make(Config.GAME_TYPE, num_env)
if Config.FRAME_STACK > 1:
env = VecFrameStack(env, Config.FRAME_STACK)
epsilon = Config.EPSILON_GREEDY
if epsilon > 0:
env = wrappers.EpsilonGreedyWrapper(env, epsilon)
return env
def file_to_path(filename):
return setup_utils.file_to_path(filename)
def load_all_params(sess):
load_params_for_scope(sess, 'model')
def load_params_for_scope(sess, scope, load_key='default'):
load_data = Config.get_load_data(load_key)
if load_data is None:
return False
params_dict = load_data['params']
if scope in params_dict:
print('Loading saved file for scope', scope)
loaded_params = params_dict[scope]
loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True)
restore_params(sess, loaded_params, params)
return True
def get_savable_params(loaded_params, scope, keep_heads=False):
params = tf.trainable_variables(scope)
filtered_params = []
filtered_loaded = []
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
for p, loaded_p in zip(params, loaded_params):
keep = True
if any((scope + '/' + x) in p.name for x in ['v','pi']):
keep = keep_heads
if keep:
filtered_params.append(p)
filtered_loaded.append(loaded_p)
else:
print('drop', p)
return filtered_loaded, filtered_params
def restore_params(sess, loaded_params, params):
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
restores = []
for p, loaded_p in zip(params, loaded_params):
print('restoring', p)
restores.append(p.assign(loaded_p))
sess.run(restores)
def save_params_in_scopes(sess, scopes, filename, base_dict=None):
data_dict = {}
if base_dict is not None:
data_dict.update(base_dict)
save_path = file_to_path(filename)
data_dict['args'] = Config.get_args_dict()
param_dict = {}
for scope in scopes:
params = tf.trainable_variables(scope)
if len(params) > 0:
print('saving scope', scope, filename)
ps = sess.run(params)
param_dict[scope] = ps
data_dict['params'] = param_dict
joblib.dump(data_dict, save_path)
def setup_mpi_gpus():
if 'RCALL_NUM_GPU' not in os.environ:
return
num_gpus = int(os.environ['RCALL_NUM_GPU'])
node_id = platform.node()
nodes = MPI.COMM_WORLD.allgather(node_id)
local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id])
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def is_mpi_root():
return MPI.COMM_WORLD.Get_rank() == 0
def tf_initialize(sess):
sess.run(tf.initialize_all_variables())
sync_from_root(sess)
def sync_from_root(sess, vars=None):
if vars is None:
vars = tf.trainable_variables()
if Config.SYNC_FROM_ROOT:
rank = MPI.COMM_WORLD.Get_rank()
print('sync from root', rank)
for var in vars:
if rank == 0:
MPI.COMM_WORLD.bcast(sess.run(var))
else:
sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))
def mpi_average(values):
return mpi_average_comm(values, MPI.COMM_WORLD)
def mpi_average_comm(values, comm):
size = comm.size
x = np.array(values)
buf = np.zeros_like(x)
comm.Allreduce(x, buf, op=MPI.SUM)
buf = buf / size
return buf
def mpi_average_train_test(values):
return mpi_average_comm(values, Config.TRAIN_TEST_COMM)
def mpi_print(*args):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
print(*args)
def process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0):
rewards = [epinfo['r'] for epinfo in epinfobuf]
rew_mean = np.nanmean(rewards)
if Config.SYNC_FROM_ROOT:
rew_mean = mpi_average_train_test([rew_mean])[0]
if tb_writer is not None:
tb_writer.log_scalar(rew_mean, 'rew_mean' + suffix, step)
aux_dicts = []
if len(epinfobuf) > 0 and 'aux_dict' in epinfobuf[0]:
aux_dicts = [epinfo['aux_dict'] for epinfo in epinfobuf]
if len(aux_dicts) > 0:
keys = aux_dicts[0].keys()
for key in keys:
sub_rews = [aux_dict[key] for aux_dict in aux_dicts]
sub_rew = np.nanmean(sub_rews)
if tb_writer is not None:
tb_writer.log_scalar(sub_rew, key, step)
return rew_mean
|
from coinrun.config import Config
import os
import joblib
def load_for_setup_if_necessary():
restore_file(Config.RESTORE_ID)
def restore_file(restore_id, load_key='default'):
if restore_id is not None:
load_file = Config.get_load_filename(restore_id=restore_id)
filepath = file_to_path(load_file)
load_data = joblib.load(filepath)
Config.set_load_data(load_data, load_key=load_key)
restored_args = load_data['args']
sub_dict = {}
res_keys = Config.RES_KEYS
for key in res_keys:
if key in restored_args:
sub_dict[key] = restored_args[key]
else:
print('warning key %s not restored' % key)
Config.parse_args_dict(sub_dict)
from coinrun.coinrunenv import init_args_and_threads
init_args_and_threads(4)
def setup_and_load(use_cmd_line_args=True, **kwargs):
"""
Initialize the global config using command line options, defaulting to the values in `config.py`.
`use_cmd_line_args`: set to False to ignore command line arguments passed to the program
`**kwargs`: override the defaults from `config.py` with these values
"""
args = Config.initialize_args(use_cmd_line_args=use_cmd_line_args, **kwargs)
load_for_setup_if_necessary()
return args
def file_to_path(filename):
return os.path.join(Config.WORKDIR, filename) |
from coinrun import random_agent
def test_coinrun():
random_agent.random_agent(num_envs=16, max_steps=100)
if __name__ == '__main__':
test_coinrun() |
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_input
from coinrun.config import Config
def impala_cnn(images, depths=[16, 32, 32]):
"""
Model used in the paper "IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561
"""
use_batch_norm = Config.USE_BATCH_NORM == 1
dropout_layer_num = [0]
dropout_assign_ops = []
def dropout_layer(out):
if Config.DROPOUT > 0:
out_shape = out.get_shape().as_list()
num_features = np.prod(out_shape[1:])
var_name = 'mask_' + str(dropout_layer_num[0])
batch_seed_shape = out_shape[1:]
batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)
batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))
dropout_assign_ops.append(batch_seed_assign)
curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT))
curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT))
out = out * curr_mask
dropout_layer_num[0] += 1
return out
def conv_layer(out, depth):
out = tf.layers.conv2d(out, depth, 3, padding='same')
out = dropout_layer(out)
if use_batch_norm:
out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)
return out
def residual_block(inputs):
depth = inputs.get_shape()[-1].value
out = tf.nn.relu(inputs)
out = conv_layer(out, depth)
out = tf.nn.relu(out)
out = conv_layer(out, depth)
return out + inputs
def conv_sequence(inputs, depth):
out = conv_layer(inputs, depth)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
out = residual_block(out)
out = residual_block(out)
return out
out = images
for depth in depths:
out = conv_sequence(out, depth)
out = tf.layers.flatten(out)
out = tf.nn.relu(out)
out = tf.layers.dense(out, 256, activation=tf.nn.relu)
return out, dropout_assign_ops
def nature_cnn(scaled_images, **conv_kwargs):
"""
Model used in the paper "Human-level control through deep reinforcement learning"
https://www.nature.com/articles/nature14236
"""
def activ(curr):
return tf.nn.relu(curr)
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
def choose_cnn(images):
arch = Config.ARCHITECTURE
scaled_images = tf.cast(images, tf.float32) / 255.
dropout_assign_ops = []
if arch == 'nature':
out = nature_cnn(scaled_images)
elif arch == 'impala':
out, dropout_assign_ops = impala_cnn(scaled_images)
elif arch == 'impalalarge':
out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])
else:
assert(False)
return out, dropout_assign_ops
class LstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256):
nenv = nbatch // nsteps
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
vf = fc(h5, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h5)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask})
def value(ob, state, mask):
return sess.run(vf, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
vf = fc(h, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.vf = vf
self.step = step
self.value = value
def get_policy():
use_lstm = Config.USE_LSTM
if use_lstm == 1:
policy = LstmPolicy
elif use_lstm == 0:
policy = CnnPolicy
else:
assert(False)
return policy
|
"""
Python interface to the CoinRun shared library using ctypes.
On import, this will attempt to build the shared library.
"""
import os
import atexit
import random
import sys
from ctypes import c_int, c_char_p, c_float, c_bool
import gym
import gym.spaces
import numpy as np
import numpy.ctypeslib as npct
from baselines.common.vec_env import VecEnv
from baselines import logger
from coinrun.config import Config
from mpi4py import MPI
from baselines.common import mpi_util
# if the environment is crashing, try using the debug build to get
# a readable stack trace
DEBUG = False
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
game_versions = {
'standard': 1000,
'platform': 1001,
'maze': 1002,
}
def build():
lrank, _lsize = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
if lrank == 0:
dirname = os.path.dirname(__file__)
if len(dirname):
make_cmd = "QT_SELECT=5 make -C %s" % dirname
else:
make_cmd = "QT_SELECT=5 make"
r = os.system(make_cmd)
if r != 0:
logger.error('coinrun: make failed')
sys.exit(1)
MPI.COMM_WORLD.barrier()
build()
if DEBUG:
lib_path = '.build-debug/coinrun_cpp_d'
else:
lib_path = '.build-release/coinrun_cpp'
lib = npct.load_library(lib_path, os.path.dirname(__file__))
lib.init.argtypes = [c_int]
lib.get_NUM_ACTIONS.restype = c_int
lib.get_RES_W.restype = c_int
lib.get_RES_H.restype = c_int
lib.get_VIDEORES.restype = c_int
lib.vec_create.argtypes = [
c_int, # game_type
c_int, # nenvs
c_int, # lump_n
c_bool, # want_hires_render
c_float, # default_zoom
]
lib.vec_create.restype = c_int
lib.vec_close.argtypes = [c_int]
lib.vec_step_async_discrete.argtypes = [c_int, npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_args.argtypes = [npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_set_monitor_dir.argtypes = [c_char_p, c_int]
lib.vec_wait.argtypes = [
c_int,
npct.ndpointer(dtype=np.uint8, ndim=4), # normal rgb
npct.ndpointer(dtype=np.uint8, ndim=4), # larger rgb for render()
npct.ndpointer(dtype=np.float32, ndim=1), # rew
npct.ndpointer(dtype=np.bool, ndim=1), # done
]
already_inited = False
def init_args_and_threads(cpu_count=4,
monitor_csv_policy='all',
rand_seed=None):
"""
Perform one-time global init for the CoinRun library. This must be called
before creating an instance of CoinRunVecEnv. You should not
call this multiple times from the same process.
"""
os.environ['COINRUN_RESOURCES_PATH'] = os.path.join(SCRIPT_DIR, 'assets')
is_high_difficulty = Config.HIGH_DIFFICULTY
if rand_seed is None:
rand_seed = random.SystemRandom().randint(0, 1000000000)
# ensure different MPI processes get different seeds (just in case SystemRandom implementation is poor)
mpi_rank, mpi_size = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
rand_seed = rand_seed - rand_seed % mpi_size + mpi_rank
int_args = np.array([int(is_high_difficulty), Config.NUM_LEVELS, int(Config.PAINT_VEL_INFO), Config.USE_DATA_AUGMENTATION, game_versions[Config.GAME_TYPE], Config.SET_SEED, rand_seed]).astype(np.int32)
lib.initialize_args(int_args)
lib.initialize_set_monitor_dir(logger.get_dir().encode('utf-8'), {'off': 0, 'first_env': 1, 'all': 2}[monitor_csv_policy])
global already_inited
if already_inited:
return
lib.init(cpu_count)
already_inited = True
@atexit.register
def shutdown():
global already_inited
if not already_inited:
return
lib.coinrun_shutdown()
class CoinRunVecEnv(VecEnv):
"""
This is the CoinRun VecEnv, all CoinRun environments are just instances
of this class with different values for `game_type`
`game_type`: int game type corresponding to the game type to create, see `enum GameType` in `coinrun.cpp`
`num_envs`: number of environments to create in this VecEnv
`lump_n`: only used when the environment creates `monitor.csv` files
`default_zoom`: controls how much of the level the agent can see
"""
def __init__(self, game_type, num_envs, lump_n=0, default_zoom=5.0):
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
self.NUM_ACTIONS = lib.get_NUM_ACTIONS()
self.RES_W = lib.get_RES_W()
self.RES_H = lib.get_RES_H()
self.VIDEORES = lib.get_VIDEORES()
self.buf_rew = np.zeros([num_envs], dtype=np.float32)
self.buf_done = np.zeros([num_envs], dtype=np.bool)
self.buf_rgb = np.zeros([num_envs, self.RES_H, self.RES_W, 3], dtype=np.uint8)
self.hires_render = Config.IS_HIGH_RES
if self.hires_render:
self.buf_render_rgb = np.zeros([num_envs, self.VIDEORES, self.VIDEORES, 3], dtype=np.uint8)
else:
self.buf_render_rgb = np.zeros([1, 1, 1, 1], dtype=np.uint8)
num_channels = 1 if Config.USE_BLACK_WHITE else 3
obs_space = gym.spaces.Box(0, 255, shape=[self.RES_H, self.RES_W, num_channels], dtype=np.uint8)
super().__init__(
num_envs=num_envs,
observation_space=obs_space,
action_space=gym.spaces.Discrete(self.NUM_ACTIONS),
)
self.handle = lib.vec_create(
game_versions[game_type],
self.num_envs,
lump_n,
self.hires_render,
default_zoom)
self.dummy_info = [{} for _ in range(num_envs)]
def __del__(self):
if hasattr(self, 'handle'):
lib.vec_close(self.handle)
self.handle = 0
def close(self):
lib.vec_close(self.handle)
self.handle = 0
def reset(self):
print("CoinRun ignores resets")
obs, _, _, _ = self.step_wait()
return obs
def get_images(self):
if self.hires_render:
return self.buf_render_rgb
else:
return self.buf_rgb
def step_async(self, actions):
assert actions.dtype in [np.int32, np.int64]
actions = actions.astype(np.int32)
lib.vec_step_async_discrete(self.handle, actions)
def step_wait(self):
self.buf_rew = np.zeros_like(self.buf_rew)
self.buf_done = np.zeros_like(self.buf_done)
lib.vec_wait(
self.handle,
self.buf_rgb,
self.buf_render_rgb,
self.buf_rew,
self.buf_done)
obs_frames = self.buf_rgb
if Config.USE_BLACK_WHITE:
obs_frames = np.mean(obs_frames, axis=-1).astype(np.uint8)[...,None]
return obs_frames, self.buf_rew, self.buf_done, self.dummy_info
def make(env_id, num_envs, **kwargs):
assert env_id in game_versions, 'cannot find environment "%s", maybe you mean one of %s' % (env_id, list(game_versions.keys()))
return CoinRunVecEnv(env_id, num_envs, **kwargs)
|
import json
import pickle
import math
import sys
import argparse
import warnings
from os import makedirs
from os.path import basename, join, exists, dirname, splitext, realpath
from wikidata_linker_utils.progressbar import get_progress_bar
from dataset import TSVDataset, CombinedDataset, H5Dataset, ClassificationHandler
from batchifier import (iter_batches_single_threaded,
requires_vocab,
requires_character_convolution,
get_feature_vocabs)
import tensorflow as tf
import numpy as np
try:
RNNCell = tf.nn.rnn_cell.RNNCell
TFLSTMCell = tf.nn.rnn_cell.LSTMCell
MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
from tensorflow.contrib.cudnn_rnn import CudnnLSTM
except AttributeError:
RNNCell = tf.contrib.rnn.RNNCell
TFLSTMCell = tf.contrib.rnn.LSTMCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM
from tensorflow.python.client import device_lib
class LazyAdamOptimizer(tf.train.AdamOptimizer):
"""Variant of the Adam optimizer that handles sparse updates more efficiently.
The original Adam algorithm maintains two moving-average accumulators for
each trainable variable; the accumulators are updated at every step.
This class provides lazier handling of gradient updates for sparse variables.
It only updates moving-average accumulators for sparse variable indices that
appear in the current batch, rather than updating the accumulators for all
indices. Compared with the original Adam optimizer, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original Adam algorithm, and
may lead to different empirical results.
"""
def _apply_sparse(self, grad, var):
beta1_power = tf.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = tf.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
# m := beta1 * m + (1 - beta1) * g_t
# We use a slightly different version of the moving-average update formula
# that does a better job of handling concurrent lockless updates:
# m -= (1 - beta1) * (m - g_t)
m = self.get_slot(var, "m")
m_t_delta = tf.gather(m, grad.indices) - grad.values
m_t = tf.scatter_sub(m, grad.indices,
(1 - beta1_t) * m_t_delta,
use_locking=self._use_locking)
# v := beta2 * v + (1 - beta2) * (g_t * g_t)
# We reformulate the update as:
# v -= (1 - beta2) * (v - g_t * g_t)
v = self.get_slot(var, "v")
v_t_delta = tf.gather(v, grad.indices) - tf.square(grad.values)
v_t = tf.scatter_sub(v, grad.indices,
(1 - beta2_t) * v_t_delta,
use_locking=self._use_locking)
# variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))
m_t_slice = tf.gather(m_t, grad.indices)
v_t_slice = tf.gather(v_t, grad.indices)
denominator_slice = tf.sqrt(v_t_slice) + epsilon_t
var_update = tf.scatter_sub(var, grad.indices,
lr * m_t_slice / denominator_slice,
use_locking=self._use_locking)
return tf.group(var_update, m_t, v_t)
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def split(values, axis, num_splits, name=None):
return tf.split(values, num_splits, axis=axis, name=name)
def reverse(values, axis):
return tf.reverse(values, [axis])
def sparse_softmax_cross_entropy_with_logits(logits, labels):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
def concat(values, axis, name=None):
if len(values) == 1:
return values[0]
return tf.concat(values, axis, name=name)
def concat_tensor_array(values, name=None):
return values.stack(name=name)
def batch_gather_3d(values, indices):
return tf.gather(tf.reshape(values, [-1, tf.shape(values)[2]]),
tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +
indices)
def batch_gather_2d(values, indices):
return tf.gather(tf.reshape(values, [-1]),
tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +
indices)
def viterbi_decode(score, transition_params, sequence_lengths, back_prop=False,
parallel_iterations=1):
"""Decode the highest scoring sequence of tags inside of TensorFlow!!!
This can be used anytime.
Args:
score: A [batch, seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
sequence_lengths: A [batch] int32 vector of the length of each score
sequence.
Returns:
viterbi: A [batch, seq_len] list of integers containing the highest
scoring tag indices.
viterbi_score: A vector of float containing the score for the Viterbi
sequence.
"""
sequence_lengths = tf.convert_to_tensor(
sequence_lengths, name="sequence_lengths")
score = tf.convert_to_tensor(score, name="score")
transition_params = tf.convert_to_tensor(
transition_params, name="transition_params")
if sequence_lengths.dtype != tf.int32:
sequence_lengths = tf.cast(sequence_lengths, tf.int32)
def condition(t, *args):
"""Stop when full score sequence has been read in."""
return tf.less(t, tf.shape(score)[1])
def body(t, trellis, backpointers, trellis_val):
"""Perform forward viterbi pass."""
v = tf.expand_dims(trellis_val, 2) + tf.expand_dims(transition_params, 0)
new_trellis_val = score[:, t, :] + tf.reduce_max(v, axis=1)
new_trellis = trellis.write(t, new_trellis_val)
new_backpointers = backpointers.write(
t, tf.cast(tf.argmax(v, axis=1), tf.int32))
return t + 1, new_trellis, new_backpointers, new_trellis_val
trellis_arr = tf.TensorArray(score.dtype, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
first_trellis_val = score[:, 0, :]
trellis_arr = trellis_arr.write(0, first_trellis_val)
backpointers_arr = tf.TensorArray(tf.int32, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
backpointers_arr = backpointers_arr.write(0,
tf.zeros_like(score[:, 0, :], dtype=tf.int32))
_, trellis_out, backpointers_out, _ = tf.while_loop(
condition, body,
(tf.constant(1, name="t", dtype=tf.int32), trellis_arr, backpointers_arr, first_trellis_val),
parallel_iterations=parallel_iterations,
back_prop=back_prop)
trellis_out = concat_tensor_array(trellis_out)
backpointers_out = concat_tensor_array(backpointers_out)
# make batch-major:
trellis_out = tf.transpose(trellis_out, [1, 0, 2])
backpointers_out = tf.transpose(backpointers_out, [1, 0, 2])
def condition(t, *args):
return tf.less(t, tf.shape(score)[1])
def body(t, viterbi, last_decision):
backpointers_timestep = batch_gather_3d(
backpointers_out, tf.maximum(sequence_lengths - t, 0))
new_last_decision = batch_gather_2d(
backpointers_timestep, last_decision)
new_viterbi = viterbi.write(t, new_last_decision)
return t + 1, new_viterbi, new_last_decision
last_timestep = batch_gather_3d(trellis_out, sequence_lengths - 1)
# get scores for last timestep of each batch element inside
# trellis:
scores = tf.reduce_max(last_timestep, axis=1)
# get choice index for last timestep:
last_decision = tf.cast(tf.argmax(last_timestep, axis=1), tf.int32)
# decode backwards using backpointers:
viterbi = tf.TensorArray(tf.int32, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
viterbi = viterbi.write(0, last_decision)
_, viterbi_out, _ = tf.while_loop(
condition, body,
(tf.constant(1, name="t", dtype=tf.int32), viterbi, last_decision),
parallel_iterations=parallel_iterations,
back_prop=back_prop)
viterbi_out = concat_tensor_array(viterbi_out)
# make batch-major:
viterbi_out = tf.transpose(viterbi_out, [1, 0])
viterbi_out_fwd = tf.reverse_sequence(
viterbi_out, sequence_lengths, seq_dim=1)
return viterbi_out_fwd, scores
def sum_list(elements):
total = elements[0]
for el in elements[1:]:
total += el
return total
def explicitly_set_fields():
received = set()
for argument in sys.argv:
if argument.startswith("--"):
received.add(argument[2:])
if argument[2:].startswith("no"):
received.add(argument[4:])
return received
def save_session(session, saver, path, verbose=False):
"""
Call save on tf.train.Saver on a specific path to store all the variables
of the current tensorflow session to a file for later restoring.
Arguments:
session : tf.Session
path : str, place to save session
"""
makedirs(path, exist_ok=True)
if not path.endswith("/"):
path = path + "/"
path = join(path, "model.ckpt")
if verbose:
print("Saving session under %r" % (path,), flush=True)
saver.save(session, path)
print("Saved", flush=True)
### constants for saving & loading
# model config:
OBJECTIVE_NAMES = "OBJECTIVE_NAMES"
OBJECTIVE_TYPES = "OBJECTIVE_TYPES"
# inputs:
INPUT_PLACEHOLDERS = "INPUT_PLACEHOLDERS"
LABEL_PLACEHOLDERS = "LABEL_PLACEHOLDERS"
LABEL_MASK_PLACEHOLDERS = "LABEL_MASK_PLACEHOLDERS"
TRAIN_OP = "TRAIN_OP"
SEQUENCE_LENGTHS = "SEQUENCE_LENGTHS"
IS_TRAINING = "IS_TRAINING"
# outputs:
DECODED = "DECODED"
DECODED_SCORES = "DECODED_SCORES"
UNARY_SCORES = "UNARY_SCORES"
# per objective metrics:
TOKEN_CORRECT = "TOKEN_CORRECT"
TOKEN_CORRECT_TOTAL = "TOKEN_CORRECT_TOTAL"
SENTENCE_CORRECT = "SENTENCE_CORRECT"
SENTENCE_CORRECT_TOTAL = "SENTENCE_CORRECT_TOTAL"
# aggregate metrics over all objectives
NLL = "NLL"
NLL_TOTAL = "NLL_TOTAL"
TOKEN_CORRECT_ALL = "TOKEN_CORRECT_ALL"
TOKEN_CORRECT_ALL_TOTAL = "TOKEN_CORRECT_ALL_TOTAL"
SENTENCE_CORRECT_ALL = "SENTENCE_CORRECT_ALL"
SENTENCE_CORRECT_ALL_TOTAL = "SENTENCE_CORRECT_ALL_TOTAL"
CONFUSION_MATRIX = "CONFUSION_MATRIX"
GLOBAL_STEP = "global_step"
SUMMARIES_ASSIGNS = "SUMMARIES_ASSIGNS"
SUMMARIES_PLACEHOLDERS = "SUMMARIES_PLACEHOLDERS"
SUMMARIES_NAMES = "SUMMARIES_NAMES"
TRAIN_SUMMARIES = "TRAIN_SUMMARIES"
TRUE_POSITIVES = "TRUE_POSITIVES"
FALSE_POSITIVES = "FALSE_POSITIVES"
FALSE_NEGATIVES = "FALSE_NEGATIVES"
def maybe_dropout(inputs, keep_prob, is_training):
return tf.cond(is_training,
lambda : tf.nn.dropout(inputs, keep_prob),
lambda : inputs
) if keep_prob < 1 else inputs
def compute_sentence_correct(correct, sequence_mask):
any_label = tf.reduce_max(tf.cast(sequence_mask, tf.int32), 1)
sentence_correct_total = tf.reduce_sum(any_label)
# is 1 when all is correct, 0 otherwise
sentence_correct = tf.reduce_sum(tf.reduce_prod(
tf.cast(
tf.logical_or(correct, tf.logical_not(sequence_mask)),
tf.int32
),
1
) * any_label)
return sentence_correct, sentence_correct_total
def lstm_activation(inputs, input_h, input_c, W, b, activation):
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = concat([inputs, input_h], axis=1)
lstm_matrix = tf.nn.xw_plus_b(cell_inputs, W, b)
preactiv = split(lstm_matrix, axis=1, num_splits=4)
# from CUDNN docs:
# Values 0 and 4 reference the input gate.
# Values 1 and 5 reference the forget gate.
# Values 2 and 6 reference the new memory gate.
# Values 3 and 7 reference the output gate
i, f, j, o = (
preactiv[CUDNN_MAPPING["i"]],
preactiv[CUDNN_MAPPING["f"]],
preactiv[CUDNN_MAPPING["j"]],
preactiv[CUDNN_MAPPING["o"]]
)
c = (tf.nn.sigmoid(f) * input_c +
tf.nn.sigmoid(i) * activation(j))
m = tf.nn.sigmoid(o) * activation(c)
return (c, m)
class Logger(object):
def __init__(self, session, writer):
self.session = session
self.writer = writer
self._placeholders = {}
summaries = tf.get_collection(SUMMARIES_ASSIGNS)
summaries_pholders = tf.get_collection(SUMMARIES_PLACEHOLDERS)
summaries_names = [name.decode("utf-8")
for name in tf.get_collection(SUMMARIES_NAMES)]
for summary, pholder, name in zip(summaries, summaries_pholders, summaries_names):
self._placeholders[name] = (pholder, summary)
def log(self, name, value, step):
if name not in self._placeholders:
pholder = tf.placeholder(tf.float32, [], name=name)
summary = tf.summary.scalar(name, pholder)
tf.add_to_collection(SUMMARIES_ASSIGNS, summary)
tf.add_to_collection(SUMMARIES_NAMES, name)
tf.add_to_collection(SUMMARIES_PLACEHOLDERS, pholder)
self._placeholders[name] = (pholder, summary)
pholder, summary = self._placeholders[name]
res = self.session.run(summary, {pholder:value})
self.writer.add_summary(res, step)
class ParametrizedLSTMCell(RNNCell):
def __init__(self, weights, biases, hidden_size):
self._weights = weights
self._biases = biases
self.hidden_size = hidden_size
@property
def state_size(self):
return (self.hidden_size, self.hidden_size)
@property
def output_size(self):
return self.hidden_size
def __call__(self, inputs, state, scope=None):
input_h, input_c = state
c, m = lstm_activation(inputs,
input_h=input_h,
input_c=input_c,
b=self._biases,
W=self._weights,
activation=tf.nn.tanh)
return m, (m, c)
class LSTMCell(TFLSTMCell):
def __init__(self,
num_units,
keep_prob=1.0,
is_training=False):
self._is_training = is_training
self._keep_prob = keep_prob
TFLSTMCell.__init__(
self,
num_units=num_units,
state_is_tuple=True
)
def __call__(self, inputs, state, scope=None):
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with tf.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + self._num_units, 4 * self._num_units],
dtype, 1)
b = tf.get_variable(
"B", shape=[4 * self._num_units],
initializer=tf.zeros_initializer(), dtype=dtype)
c, m = lstm_activation(inputs,
input_c=c_prev,
input_h=m_prev,
W=concat_w,
b=b,
activation=self._activation,
keep_prob=self._keep_prob,
is_training=self._is_training,
forget_bias=self._forget_bias)
return m, LSTMStateTuple(c, m)
def cudnn_lstm_parameter_size(input_size, hidden_size):
"""Number of parameters in a single CuDNN LSTM cell."""
biases = 8 * hidden_size
weights = 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)
return biases + weights
def direction_to_num_directions(direction):
if direction == "unidirectional":
return 1
elif direction == "bidirectional":
return 2
else:
raise ValueError("Unknown direction: %r." % (direction,))
def estimate_cudnn_parameter_size(num_layers,
input_size,
hidden_size,
input_mode,
direction):
"""
Compute the number of parameters needed to
construct a stack of LSTMs. Assumes the hidden states
of bidirectional LSTMs are concatenated before being
sent to the next layer up.
"""
num_directions = direction_to_num_directions(direction)
params = 0
isize = input_size
for layer in range(num_layers):
for direction in range(num_directions):
params += cudnn_lstm_parameter_size(
isize, hidden_size
)
isize = hidden_size * num_directions
return params
# cudnn conversion to dynamic RNN:
CUDNN_LAYER_WEIGHT_ORDER = [
"x", "x", "x", "x", "h", "h", "h", "h"
]
CUDNN_LAYER_BIAS_ORDER = [
"bx", "bx", "bx", "bx", "bh", "bh", "bh", "bh"
]
CUDNN_TRANSPOSED = True
CUDNN_MAPPING = {"i": 0, "f": 1, "j": 2, "o": 3}
def consume_biases_direction(params, old_offset, hidden_size, isize):
offset = old_offset
layer_biases_x = []
layer_biases_h = []
for piece in CUDNN_LAYER_BIAS_ORDER:
if piece == "bx":
layer_biases_x.append(
params[offset:offset + hidden_size]
)
offset += hidden_size
elif piece == "bh":
layer_biases_h.append(
params[offset:offset + hidden_size]
)
offset += hidden_size
else:
raise ValueError("Unknown cudnn piece %r." % (piece,))
b = concat(layer_biases_x, axis=0) + concat(layer_biases_h, axis=0)
return b, offset
def consume_weights_direction(params, old_offset, hidden_size, isize):
offset = old_offset
layer_weights_x = []
layer_weights_h = []
for piece in CUDNN_LAYER_WEIGHT_ORDER:
if piece == "x":
layer_weights_x.append(
tf.reshape(
params[offset:offset + hidden_size * isize],
[hidden_size, isize] if CUDNN_TRANSPOSED else [isize, hidden_size]
)
)
offset += hidden_size * isize
elif piece == "h":
layer_weights_h.append(
tf.reshape(
params[offset:offset + hidden_size * hidden_size],
[hidden_size, hidden_size]
)
)
offset += hidden_size * hidden_size
else:
raise ValueError("Unknown cudnn piece %r." % (piece,))
if CUDNN_TRANSPOSED:
W_T = concat([concat(layer_weights_x, axis=0), concat(layer_weights_h, axis=0)], axis=1)
W = tf.transpose(W_T)
else:
W = concat([concat(layer_weights_x, axis=1), concat(layer_weights_h, axis=1)], axis=0)
return W, offset
def decompose_layer_params(params, num_layers,
hidden_size, cell_input_size,
input_mode, direction, create_fn):
"""
This operation converts the opaque cudnn params into a set of
usable weight matrices.
Args:
params : Tensor, opaque cudnn params tensor
num_layers : int, number of stacked LSTMs.
hidden_size : int, number of neurons in each LSTM.
cell_input_size : int, input size for the LSTMs.
input_mode: whether a pre-projection was used or not. Currently only
'linear_input' is supported (e.g. CuDNN does its own projection
internally)
direction : str, 'unidirectional' or 'bidirectional'.
create_fn: callback for weight creation. Receives parameter slice (op),
layer (int), direction (0 = fwd, 1 = bwd),
parameter_index (0 = W, 1 = b).
Returns:
weights : list of lists of Tensors in the format:
first list is indexed layers,
inner list is indexed by direction (fwd, bwd),
tensors in the inner list are (Weights, biases)
"""
if input_mode != "linear_input":
raise ValueError("Only input_mode == linear_input supported for now.")
num_directions = direction_to_num_directions(direction)
offset = 0
all_weights = [[[] for j in range(num_directions)]
for i in range(num_layers)]
isize = cell_input_size
with tf.variable_scope("DecomposeCudnnParams"):
for layer in range(num_layers):
with tf.variable_scope("Layer{}".format(layer)):
for direction in range(num_directions):
with tf.variable_scope("fwd" if direction == 0 else "bwd"):
with tf.variable_scope("weights"):
W, offset = consume_weights_direction(
params,
old_offset=offset,
hidden_size=hidden_size,
isize=isize)
all_weights[layer][direction].append(
create_fn(W, layer, direction, 0))
isize = hidden_size * num_directions
isize = cell_input_size
for layer in range(num_layers):
with tf.variable_scope("Layer{}".format(layer)):
for direction in range(num_directions):
with tf.variable_scope("fwd" if direction == 0 else "bwd"):
with tf.variable_scope("biases"):
b, offset = consume_biases_direction(
params,
old_offset=offset,
hidden_size=hidden_size,
isize=isize)
all_weights[layer][direction].append(
create_fn(b, layer, direction, 1))
isize = hidden_size * num_directions
return all_weights
def create_decomposed_variable(param, lidx, didx, pidx):
with tf.device("cpu"):
return tf.get_variable("w" if pidx == 0 else "b",
shape=param.get_shape().as_list(),
dtype=param.dtype,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
"excluded_variables"])
def cpu_cudnn_params(params, num_layers, hidden_size, cell_input_size, input_mode,
direction):
"""
This operation converts the opaque cudnn params into a set of
usable weight matrices, and caches the conversion.
Args:
params : Tensor, opaque cudnn params tensor
num_layers : int, number of stacked LSTMs.
hidden_size : int, number of neurons in each LSTM.
cell_input_size : int, input size for the LSTMs.
input_mode: whether a pre-projection was used or not. Currently only
'linear_input' is supported (e.g. CuDNN does its own projection
internally)
direction : str, 'unidirectional' or 'bidirectional'.
skip_creation : bool, whether to build variables.
Returns:
weights : list of lists of Tensors in the format:
first list is indexed layers,
inner list is indexed by direction (fwd, bwd),
tensors in the inner list are (Weights, biases)
"""
# create a boolean status variable that checks whether the
# weights have been converted to cpu format:
with tf.device("cpu"):
cpu_conversion_status = tf.get_variable(
name="CudnnConversionStatus", dtype=tf.float32,
initializer=tf.zeros_initializer(), shape=[],
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
# create a fresh copy of the weights (not trainable)
reshaped = decompose_layer_params(
params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode=input_mode,
direction=direction,
create_fn=create_decomposed_variable)
def cpu_convert():
all_assigns = decompose_layer_params(
params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode=input_mode,
direction=direction,
create_fn=lambda p, lidx, didx, pidx: tf.assign(reshaped[lidx][didx][pidx], p))
all_assigns = [assign for layer_assign in all_assigns
for dir_assign in layer_assign
for assign in dir_assign]
all_assigns.append(tf.assign(cpu_conversion_status, tf.constant(1.0, dtype=tf.float32)))
all_assigns.append(tf.Print(cpu_conversion_status, [0],
message="Converted cudnn weights to CPU format. "))
with tf.control_dependencies(all_assigns):
ret = tf.identity(cpu_conversion_status)
return ret
# cache the reshaping/concatenating
ensure_conversion = tf.cond(tf.greater(cpu_conversion_status, 0),
lambda: cpu_conversion_status,
cpu_convert)
# if weights are already reshaped, go ahead:
with tf.control_dependencies([ensure_conversion]):
# wrap with identity to ensure there is a dependency between assignment
# and using the weights:
all_params = [[[tf.identity(p) for p in dir_param]
for dir_param in layer_param]
for layer_param in reshaped]
return all_params
class CpuCudnnLSTM(object):
def __init__(self, num_layers, hidden_size,
cell_input_size, input_mode, direction):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cell_input_size = cell_input_size
self.input_mode = input_mode
self.direction = direction
def __call__(self,
inputs,
input_h,
input_c,
params,
is_training=True):
layer_params = cpu_cudnn_params(params,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
cell_input_size=self.cell_input_size,
input_mode=self.input_mode,
direction=self.direction)
REVERSED = 1
layer_inputs = inputs
cell_idx = 0
for layer_param in layer_params:
hidden_fwd_bwd = []
final_output_c = []
final_output_h = []
for direction, (W, b) in enumerate(layer_param):
if direction == REVERSED:
layer_inputs = reverse(layer_inputs, axis=0)
hiddens, (output_h, output_c) = tf.nn.dynamic_rnn(
cell=ParametrizedLSTMCell(W, b, self.hidden_size),
inputs=layer_inputs,
dtype=inputs.dtype,
time_major=True,
initial_state=(input_h[cell_idx], input_c[cell_idx]))
if direction == REVERSED:
hiddens = reverse(hiddens, axis=0)
hidden_fwd_bwd.append(hiddens)
final_output_c.append(tf.expand_dims(output_c, 0))
final_output_h.append(tf.expand_dims(output_h, 0))
cell_idx += 1
if len(hidden_fwd_bwd) > 1:
layer_inputs = concat(hidden_fwd_bwd, axis=2)
final_output_c = concat(final_output_c, axis=0)
final_output_h = concat(final_output_h, axis=0)
else:
layer_inputs = hidden_fwd_bwd[0]
final_output_c = final_output_c[0]
final_output_h = final_output_h[0]
return layer_inputs, final_output_h, final_output_c
def highway(x, activation_fn=tf.nn.relu, scope=None):
size = x.get_shape()[-1].value
with tf.variable_scope(scope or "HighwayLayer"):
activ = tf.contrib.layers.fully_connected(
x, size * 2, activation_fn=None, scope="FC"
)
transform = tf.sigmoid(activ[..., :size], name="transform_gate")
hidden = activation_fn(activ[..., size:])
carry = 1.0 - transform
return tf.add(hidden * transform, x * carry, "y")
def conv2d(inputs, output_dim, k_h, k_w,
stddev=0.02, scope=None,
weight_noise=0.0, is_training=True):
with tf.variable_scope(scope or "Conv2D"):
w = tf.get_variable('w', [k_h, k_w, inputs.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if weight_noise > 0 and not isinstance(is_training, bool):
w = add_weight_noise(w, is_training=is_training, stddev=weight_noise)
return tf.nn.conv2d(inputs, w, strides=[1, 1, 1, 1], padding="VALID")
def character_convolution(inputs, feature):
inputs_2d = tf.reshape(inputs,
[tf.shape(inputs)[0] * tf.shape(inputs)[1], tf.shape(inputs)[2]]
)
inputs_3d = embedding_lookup(
inputs_2d,
dim=feature["dimension"],
# 255 different bytes (uint8)
# & start and end symbol:
size=257,
dtype=tf.float32,
mask_negative=True)
inputs_4d = tf.expand_dims(inputs_3d, 1)
feature_pools = []
for idx, conv_filter in enumerate(feature["filters"]):
width, channels = conv_filter["width"], conv_filter["channels"]
# [batch * time x 1 x word_length x embed_dim x feature_map_dim]
conv = tf.squeeze(conv2d(inputs_4d, channels, 1, width, scope="CharacterConvolution%d" % (idx,)), [1])
# remove word dimension
pool = tf.reduce_max(conv, 1)
feature_pools.append(pool)
activations = concat(feature_pools, axis=1)
channels_out = sum(conv_filter["channels"] for conv_filter in feature["filters"])
activations = tf.reshape(
tf.tanh(activations),
[tf.shape(inputs)[0], tf.shape(inputs)[1], channels_out],
name="CharacterConvolutionPooled")
for idx in range(feature["highway_layers"]):
activations = highway(activations, scope="HighwayLayer%d" % (idx,),
activation_fn=tf.tanh)
return activations
def feature_dtype(feat):
if requires_vocab(feat):
return tf.int32
elif feat["type"] in {"digit", "punctuation_count", "uppercase"}:
return tf.float32
elif requires_character_convolution(feat):
return tf.int32
else:
raise ValueError("unknown feature %r." % (feat,))
def feature_shape(feature):
if requires_vocab(feature) or feature["type"] in {'digit', 'punctuation_count', 'uppercase'}:
return [None, None]
elif requires_character_convolution(feature):
return [None, None, None]
else:
raise ValueError("unknown feature %r." % (feature,))
def build_inputs(features, objectives, fused, class_weights,
class_weights_clipval):
input_placeholders = []
labels = []
labels_mask = []
labels_class_weights = []
max_output_vocab = max(len(obj["vocab"]) for obj in objectives)
with tf.variable_scope("Inputs"):
is_training = tf.placeholder(tf.bool, [], name="is_training")
tf.add_to_collection(IS_TRAINING, is_training)
for idx, feat in enumerate(features):
input_placeholder = tf.placeholder(
feature_dtype(feat), feature_shape(feat),
name="input_placeholders_%d" % (idx,)
)
input_placeholders.append(input_placeholder)
tf.add_to_collection(INPUT_PLACEHOLDERS, input_placeholder)
if fused:
label_placeholder = tf.placeholder(
tf.int32, [None, None, len(objectives)]
)
labels_mask_placeholder = tf.placeholder(
tf.bool, [None, None, len(objectives)], name="labels_mask"
)
labels.append(label_placeholder)
labels_mask.append(labels_mask_placeholder)
tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)
tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)
if class_weights:
with tf.variable_scope("FusedClassWeights"):
init_class_weights = tf.get_variable(
name="class_weights",
shape=[len(objectives) * max_output_vocab],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
init_class_count = tf.get_variable(
name="class_weights_denominator",
shape=[len(objectives)],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
def update_class_weights():
mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1, len(objectives)]), tf.int64)
updated_cls_weights = tf.scatter_add(
init_class_weights,
tf.reshape(label_placeholder + tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]), [-1]),
tf.reshape(mask_as_ints, [-1])
)
updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints, 0))
# class weight: weight_i = total / class_i
weights = tf.clip_by_value(tf.expand_dims(updated_class_count, 1) /
tf.reshape(updated_cls_weights, [len(objectives), max_output_vocab]),
1e-6, class_weights_clipval)
return tf.cast(weights, tf.float32)
def return_class_weights():
# class weight: weight_i = total / class_i
return tf.cast(
tf.clip_by_value(tf.expand_dims(init_class_count, 1) /
tf.reshape(init_class_weights, [len(objectives), max_output_vocab]),
1e-6, class_weights_clipval), tf.float32)
labels_class_weights.append(
tf.cond(is_training,
update_class_weights,
return_class_weights))
else:
labels_class_weights.append(None)
else:
for objective in objectives:
with tf.variable_scope(objective["name"]):
label_placeholder = tf.placeholder(
tf.int32, [None, None], name="labels"
)
labels.append(label_placeholder)
if objective["type"] == "crf":
labels_mask_placeholder = tf.placeholder(
tf.bool, [None], name="labels_mask"
)
labels_class_weights.append(None)
elif objective["type"] == "softmax":
labels_mask_placeholder = tf.placeholder(
tf.bool, [None, None], name="labels_mask"
)
if class_weights:
init_class_weights = tf.get_variable(
name="class_weights",
shape=len(objective["vocab"]),
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
init_class_count = tf.get_variable(
name="class_weights_denominator",
shape=[],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
def update_class_weights():
mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1]), tf.int64)
updated_cls_weights = tf.scatter_add(
init_class_weights,
tf.reshape(label_placeholder, [-1]),
mask_as_ints
)
updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints))
# class weight: weight_i = total / class_i
weights = tf.clip_by_value(updated_class_count / updated_cls_weights,
1e-6, class_weights_clipval)
return tf.cast(weights, tf.float32)
def return_class_weights():
# class weight: weight_i = total / class_i
return tf.cast(
tf.clip_by_value(init_class_count / init_class_weights,
1e-6, class_weights_clipval), tf.float32)
labels_class_weights.append(
tf.cond(is_training, update_class_weights, return_class_weights)
)
else:
labels_class_weights.append(None)
else:
raise ValueError(
"unknown objective type %r." % (
objective["type"]
)
)
labels_mask.append(labels_mask_placeholder)
tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)
tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)
sequence_lengths = tf.placeholder(tf.int32, [None],
name="sequence_lengths")
tf.add_to_collection(SEQUENCE_LENGTHS, sequence_lengths)
return (input_placeholders,
labels,
labels_mask,
labels_class_weights,
sequence_lengths,
is_training)
def add_weight_noise(x, is_training, stddev):
return tf.cond(is_training,
lambda: x + tf.random_normal(
shape=tf.shape(x), stddev=stddev),
lambda: x)
def build_recurrent(inputs, cudnn, faux_cudnn, hidden_sizes, is_training,
keep_prob, weight_noise):
dtype = tf.float32
if cudnn:
if len(hidden_sizes) == 0:
raise ValueError("hidden_sizes must be a list of length > 1.")
hidden_size = hidden_sizes[0]
if any(hidden_size != hsize for hsize in hidden_sizes):
raise ValueError("cudnn RNN requires all hidden units "
"to be the same size (got %r)" % (
hidden_sizes,
))
num_layers = len(hidden_sizes)
cell_input_size = inputs.get_shape()[-1].value
est_size = estimate_cudnn_parameter_size(
num_layers=num_layers,
hidden_size=hidden_size,
input_size=cell_input_size,
input_mode="linear_input",
direction="bidirectional"
)
# autoswitch to GPUs based on availability of alternatives:
cudnn_params = tf.get_variable("RNNParams",
shape=[est_size],
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer())
if weight_noise > 0:
cudnn_params = add_weight_noise(cudnn_params,
stddev=weight_noise, is_training=is_training)
if faux_cudnn:
cudnn_cell = CpuCudnnLSTM(num_layers,
hidden_size,
cell_input_size,
input_mode="linear_input",
direction="bidirectional")
else:
cpu_cudnn_params(cudnn_params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode="linear_input",
direction="bidirectional")
cudnn_cell = CudnnLSTM(num_layers,
hidden_size,
cell_input_size,
input_mode="linear_input",
direction="bidirectional")
init_state = tf.fill(
(2 * num_layers, tf.shape(inputs)[1], hidden_size),
tf.constant(np.float32(0.0)))
hiddens, output_h, output_c = cudnn_cell(
inputs,
input_h=init_state,
input_c=init_state,
params=cudnn_params,
is_training=True)
hiddens = maybe_dropout(
hiddens,
keep_prob,
is_training)
else:
cell = MultiRNNCell(
[LSTMCell(hsize, is_training=is_training, keep_prob=keep_prob)
for hsize in hidden_sizes]
)
hiddens, _ = bidirectional_dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtype,
swap_memory=True
)
return hiddens
def build_embed(inputs, features, index2words, keep_prob, is_training):
embeddings = []
for idx, (values, feature, index2word) in enumerate(zip(inputs, features, index2words)):
if requires_vocab(feature):
with tf.variable_scope("embedding_%d" % (idx,)):
embedding = embedding_lookup(
values,
dim=feature["dimension"],
size=len(index2word),
dtype=tf.float32,
mask_negative=True
)
embeddings.append(embedding)
elif requires_character_convolution(feature):
embeddings.append(
character_convolution(values, feature)
)
else:
embeddings.append(tf.expand_dims(values, 2))
return maybe_dropout(concat(embeddings, axis=2), keep_prob, is_training)
def crf_metrics(unary_scores, labels, transition_params, sequence_lengths,
mask):
"""
Computes CRF output metrics.
Receives:
unary_scores : batch-major order
labels : batch-major order
transition_params : nclasses x nclasses matrix.
sequence_lengths : length of each time-sequence
mask : batch-major example mask
Returns:
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total
"""
classes = unary_scores.get_shape()[-1].value
decoded, scores = viterbi_decode(unary_scores,
transition_params,
sequence_lengths)
tf.add_to_collection(UNARY_SCORES, unary_scores)
tf.add_to_collection(DECODED, decoded)
tf.add_to_collection(DECODED_SCORES, scores)
equals_label = tf.equal(labels, decoded)
token_correct = tf.reduce_sum(
tf.cast(
tf.logical_and(equals_label, mask),
tf.int32
)
)
token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))
tf.add_to_collection(TOKEN_CORRECT, token_correct)
tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)
sentence_correct, _ = compute_sentence_correct(equals_label, mask)
sentence_correct_total = tf.reduce_sum(tf.cast(mask[:, 0], tf.int32))
tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)
tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)
build_true_false_positives(decoded, mask, labels,
classes, equals_label)
return (token_correct, token_correct_total,
sentence_correct, sentence_correct_total)
def build_true_false_positives(decoded, mask_batch_major, labels_batch_major,
classes, equals_label):
masked_equals_label = tf.logical_and(equals_label, mask_batch_major)
# now for each class compute tp, fp, fn
# [nclasses x batch x time]
masked_per_class = tf.logical_and(
tf.equal(labels_batch_major[None, :, :], tf.range(classes)[:, None, None]),
mask_batch_major)
# correct, and on label
correct = tf.reduce_sum(tf.cast(tf.logical_and(masked_per_class, equals_label[None, :, :]), tf.int32),
axis=[1, 2])
# predicted a particular class
guessed = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(decoded[None, :, :], tf.range(classes)[:, None, None]), mask_batch_major), tf.int32),
axis=[1, 2])
total = tf.reduce_sum(tf.cast(masked_per_class, tf.int32), axis=[1, 2])
tp, fp, fn = correct, guessed - correct, total - correct
tf.add_to_collection(TRUE_POSITIVES, tp)
tf.add_to_collection(FALSE_POSITIVES, fp)
tf.add_to_collection(FALSE_NEGATIVES, fn)
def softmax_metrics(unary_scores, labels, mask):
"""
Compute softmax output stats for correct/accuracy per-token/per-sentence.
Receive
unary_scores : time-major
labels : time-major
mask : time-major
Returns:
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total
"""
classes = unary_scores.get_shape()[-1].value
unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])
labels_batch_major = tf.transpose(labels, [1, 0])
mask_batch_major = tf.transpose(mask, [1, 0])
decoded = tf.cast(tf.argmax(unary_scores_batch_major, 2), labels.dtype)
unary_probs_batch_major = tf.nn.softmax(unary_scores_batch_major)
scores = tf.reduce_max(unary_probs_batch_major, 2)
tf.add_to_collection(UNARY_SCORES, unary_probs_batch_major)
tf.add_to_collection(DECODED, decoded)
tf.add_to_collection(DECODED_SCORES, scores)
equals_label = tf.equal(decoded, labels_batch_major)
token_correct = tf.reduce_sum(
tf.cast(
tf.logical_and(
equals_label,
mask_batch_major
),
tf.int32
)
)
token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))
tf.add_to_collection(TOKEN_CORRECT, token_correct)
tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)
sentence_correct, sentence_correct_total = compute_sentence_correct(
equals_label, mask_batch_major
)
tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)
tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)
build_true_false_positives(decoded, mask_batch_major, labels_batch_major,
classes, equals_label)
return (token_correct, token_correct_total,
sentence_correct, sentence_correct_total)
def add_objective_names_types(objectives):
for objective in objectives:
with tf.variable_scope(objective["name"]):
# store objective names in graph:
tf.add_to_collection(OBJECTIVE_NAMES,
tf.constant(objective["name"], name="objective_name")
)
tf.add_to_collection(OBJECTIVE_TYPES,
tf.constant(objective["type"], name="objective_type")
)
def build_loss(inputs, objectives, labels, labels_mask,
labels_class_weights, fused, sequence_lengths,
class_weights_normalize):
"""
Compute loss function given the objectives.
Assumes inputs are of the form [time, batch, features].
Arguments:
----------
inputs : tf.Tensor
objectives : list<dict>, objective specs
labels : list<tf.Tensor>
labels_mask : list<tf.Tensor>
labels_class_weights : list<tf.Tensor>
sequence_lengths : tf.Tensor
Returns:
loss : tf.Tensor (scalar)
"""
losses = []
negative_log_likelihoods = []
sentence_corrects = []
sentence_corrects_total = []
token_corrects = []
token_corrects_total = []
max_output_vocab = max(len(obj["vocab"]) for obj in objectives)
total_output_size = len(objectives) * max_output_vocab
add_objective_names_types(objectives)
if fused:
with tf.variable_scope("FusedOutputs"):
objective_labels = labels[0]
mask = labels_mask[0]
objective_class_weights = labels_class_weights[0]
# perform all classifications at once:
unary_scores = tf.contrib.layers.fully_connected(
inputs, total_output_size,
activation_fn=None
)
unary_scores = tf.reshape(unary_scores,
[tf.shape(unary_scores)[0],
tf.shape(unary_scores)[1],
len(objectives),
max_output_vocab])
negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(
logits=unary_scores,
labels=objective_labels
)
labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)
masked_negative_log_likelihood = negative_log_likelihood * labels_mask_casted
if objective_class_weights is not None:
class_weights_mask = tf.gather(
tf.reshape(objective_class_weights, [-1]),
objective_labels +
tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]))
if class_weights_normalize:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
losses.append(normed_loss)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
for idx, objective in enumerate(objectives):
with tf.variable_scope(objective["name"]):
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = softmax_metrics(unary_scores[:, :, idx, :len(objective["vocab"])],
labels=objective_labels[:, :, idx],
mask=mask[:, :, idx])
token_corrects.append(token_correct)
token_corrects_total.append(token_correct_total)
sentence_corrects.append(sentence_correct)
sentence_corrects_total.append(sentence_correct_total)
else:
for objective, objective_labels, mask, objective_class_weights in zip(objectives, labels, labels_mask, labels_class_weights):
with tf.variable_scope(objective["name"]):
if objective["type"] == "crf":
unary_scores = tf.contrib.layers.fully_connected(
inputs,
len(objective["vocab"]),
activation_fn=None
)
unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])
labels_batch_major = tf.transpose(objective_labels, [1, 0])
padded_unary_scores_batch_major = tf.cond(tf.greater(tf.shape(unary_scores_batch_major)[1], 1),
lambda: unary_scores_batch_major,
lambda: tf.pad(unary_scores_batch_major, [[0, 0], [0, 1], [0, 0]]))
padded_labels_batch_major = tf.cond(tf.greater(tf.shape(labels_batch_major)[1], 1),
lambda: labels_batch_major,
lambda: tf.pad(labels_batch_major, [[0, 0], [0, 1]]))
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
padded_unary_scores_batch_major, padded_labels_batch_major, sequence_lengths
)
labels_mask_casted = tf.cast(mask, log_likelihood.dtype)
masked_log_likelihood = (
log_likelihood * labels_mask_casted
)
masked_negative_log_likelihood_sum = -tf.reduce_sum(masked_log_likelihood)
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
losses.append(masked_negative_log_likelihood_sum / num_predictions)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
sequence_mask = tf.logical_and(
tf.sequence_mask(sequence_lengths),
# pad the time dimension:
tf.expand_dims(mask, 1)
)
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = crf_metrics(unary_scores_batch_major,
labels=labels_batch_major,
mask=sequence_mask,
transition_params=transition_params,
sequence_lengths=sequence_lengths)
elif objective["type"] == 'softmax':
unary_scores = tf.contrib.layers.fully_connected(
inputs,
len(objective["vocab"]),
activation_fn=None
)
negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(
logits=unary_scores,
labels=objective_labels
)
labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)
masked_negative_log_likelihood = (
negative_log_likelihood * labels_mask_casted
)
if objective_class_weights is not None:
class_weights_mask = tf.gather(objective_class_weights, objective_labels)
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
if class_weights_normalize:
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
else:
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
losses.append(normed_loss)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = softmax_metrics(unary_scores,
labels=objective_labels,
mask=mask)
else:
raise ValueError(
"unknown objective type %r" % (objective["type"],)
)
token_corrects.append(token_correct)
token_corrects_total.append(token_correct_total)
sentence_corrects.append(sentence_correct)
sentence_corrects_total.append(sentence_correct_total)
# aggregate metrics for all objectives:
total_loss = tf.reduce_sum(sum_list(losses))
tf.summary.scalar("BatchLoss", total_loss)
neg_log_likelihood_total = sum_list(negative_log_likelihoods)
tf.summary.scalar("BatchNLL", neg_log_likelihood_total)
tf.add_to_collection(NLL, neg_log_likelihood_total)
tf.add_to_collection(NLL_TOTAL, tf.shape(inputs)[1])
sentence_corrects_total = sum_list(sentence_corrects_total)
sentence_corrects = sum_list(sentence_corrects)
tf.add_to_collection(SENTENCE_CORRECT_ALL, sentence_corrects)
tf.add_to_collection(SENTENCE_CORRECT_ALL_TOTAL, sentence_corrects_total)
token_corrects_total = sum_list(token_corrects_total)
token_corrects = sum_list(token_corrects)
tf.add_to_collection(TOKEN_CORRECT_ALL, token_corrects)
tf.add_to_collection(TOKEN_CORRECT_ALL_TOTAL, token_corrects_total)
return total_loss
def build_model(name,
trainable,
features,
feature_index2words,
objectives,
keep_prob,
input_keep_prob,
hidden_sizes,
freeze_rate,
freeze_rate_anneal,
solver,
cudnn,
fused,
faux_cudnn,
class_weights,
class_weights_normalize,
class_weights_clipval,
lr,
weight_noise,
anneal_rate,
clip_norm):
# mixed output fusing is currently unsupported
if fused and any(obj["type"] != "softmax" for obj in objectives):
raise ValueError("cannot fuse outputs and use non-softmax output.")
# clear all existing collections to ensure every new collection is
# is created fresh
graph = tf.get_default_graph()
for collection_name in graph.get_all_collection_keys():
graph.clear_collection(collection_name)
# build a model under the model's name to prevent collisions
# when multiple models are restored simultaneously
with tf.variable_scope(name):
global_step = tf.Variable(0, trainable=False, name="global_step")
tf.add_to_collection(GLOBAL_STEP, global_step)
# model placeholders:
(input_placeholders,
labels,
labels_mask,
labels_class_weights,
sequence_lengths,
is_training) = build_inputs(features,
objectives=objectives,
fused=fused,
class_weights=class_weights,
class_weights_clipval=class_weights_clipval)
embed = build_embed(input_placeholders,
features=features,
index2words=feature_index2words,
is_training=is_training,
keep_prob=input_keep_prob)
hiddens = embed
if len(hidden_sizes) > 0:
hiddens = build_recurrent(hiddens,
cudnn=cudnn,
faux_cudnn=faux_cudnn,
hidden_sizes=hidden_sizes,
keep_prob=keep_prob,
weight_noise=weight_noise,
is_training=is_training)
loss = build_loss(hiddens,
objectives=objectives,
fused=fused,
labels=labels,
labels_mask=labels_mask,
labels_class_weights=labels_class_weights,
class_weights_normalize=class_weights_normalize,
sequence_lengths=sequence_lengths)
if trainable:
learning_rate = tf.train.exponential_decay(lr, global_step,
33000, anneal_rate, staircase=True)
if solver == "adam":
optimizer = LazyAdamOptimizer(learning_rate)
elif solver == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError("Unknown solver %r." % (solver))
grad_vars = optimizer.compute_gradients(loss)
if clip_norm > 0:
grad_vars = [(grad if isinstance(grad, tf.IndexedSlices) else tf.clip_by_norm(grad, clip_norm), var) for grad, var in grad_vars]
train_op = optimizer.apply_gradients(grad_vars, global_step=global_step)
else:
train_op = tf.no_op()
tf.add_to_collection(TRAIN_OP, train_op)
tf.add_to_collection(TRAIN_SUMMARIES, tf.summary.merge_all())
def restore_session(session,
path,
replace_to=None,
replace_from=None,
verbose=False,
use_metagraph=True,
only_features=False):
"""
Call restore on tf.train.Saver on a specific path to store all the
variables of the current tensorflow session to a file for later restoring.
Arguments:
session : tf.Session
path : str, place containing the session data to restore
verbose : bool, print status messages.
use_metagraph : bool, restore by re-creating saved metagraph.
Returns:
bool : success or failure of the restoration
"""
makedirs(path, exist_ok=True)
if not path.endswith("/"):
path = path + "/"
checkpoint = tf.train.get_checkpoint_state(path)
if verbose:
print("Looking for saved session under %r" % (path,), flush=True)
if checkpoint is None or checkpoint.model_checkpoint_path is None:
if verbose:
print("No saved session found", flush=True)
return False
fname = basename(checkpoint.model_checkpoint_path)
if verbose:
print("Restoring saved session from %r" % (join(path, fname),), flush=True)
if use_metagraph:
param_saver = tf.train.import_meta_graph(join(path, fname + ".meta"),
clear_devices=True)
missing_vars = []
else:
if only_features:
to_restore = {}
whitelist = ["embedding", "/RNN/", "/RNNParams", "CharacterConvolution", "HighwayLayer"]
for var in tf.global_variables():
if any(keyword in var.name for keyword in whitelist):
to_restore[var.name[:-2]] = var
param_saver = tf.train.Saver(to_restore)
else:
if replace_to is not None and replace_from is not None:
to_restore = {}
for var in tf.global_variables():
var_name = var.name[:var.name.rfind(":")]
old_name = var_name.replace(replace_to, replace_from)
to_restore[old_name] = var
param_saver = tf.train.Saver(to_restore)
missing_vars = []
else:
reader = tf.train.NewCheckpointReader(join(path, fname))
saved_shapes = reader.get_variable_to_shape_map()
found_vars = [var for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes]
missing_vars = [var for var in tf.global_variables()
if var.name.split(':')[0] not in saved_shapes]
param_saver = tf.train.Saver(found_vars)
param_saver.restore(session, join(path, fname))
session.run([var.initializer for var in missing_vars])
return True
def bidirectional_dynamic_rnn(cell, inputs, dtype, time_major=True, swap_memory=False):
with tf.variable_scope("forward"):
out_fwd, final_fwd = tf.nn.dynamic_rnn(
cell,
inputs,
time_major=time_major,
dtype=dtype,
swap_memory=swap_memory
)
if time_major:
reverse_axis = 0
else:
reverse_axis = 1
with tf.variable_scope("backward"):
out_bwd, final_bwd = tf.nn.dynamic_rnn(
cell,
reverse(inputs, axis=reverse_axis),
time_major=time_major,
dtype=dtype,
swap_memory=swap_memory
)
out_bwd = reverse(out_bwd, axis=reverse_axis)
return concat([out_fwd, out_bwd], axis=2), (final_fwd, final_bwd)
def get_embedding_lookup(size, dim, dtype, reuse=None, trainable=True):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
W = tf.get_variable(
name="embedding",
shape=[size, dim],
dtype=dtype,
initializer=tf.random_uniform_initializer(
-1.0 / math.sqrt(dim),
1.0 / math.sqrt(dim)
),
trainable=trainable
)
return W
def embedding_lookup(inputs,
size,
dim,
dtype,
reuse=None,
mask_negative=False,
trainable=True,
place_on_cpu_if_big=True):
"""
Construct an Embedding layer that gathers
elements from a matrix with `size` rows,
and `dim` features using the indices stored in `x`.
Arguments:
----------
inputs : tf.Tensor, of integer type
size : int, how many symbols in the lookup table
dim : int, how many columns per symbol.
dtype : data type for the lookup table (e.g. tf.float32)
reuse : bool, (default None) whether the lookup table
was already used before (thus this is weight sharing).
mask_negative : bool, (default False) should -1s in the
lookup input indicate padding (e.g. no lookup),
and thus should those values be masked out post-lookup.
trainable : bool (default True), whether the parameters of
this lookup table can be backpropagated into (e.g.
for Glove word vectors that are fixed pre-trained, this
can be set to False).
place_on_cpu_if_big : bool, if matrix is big, store it on cpu.
Returns:
--------
tf.Tensor, result of tf.nn.embedding_lookup(LookupTable, inputs)
"""
W = get_embedding_lookup(size, dim, dtype, reuse, trainable=trainable)
if mask_negative:
embedded = tf.nn.embedding_lookup(W, tf.maximum(inputs, 0))
null_mask = tf.expand_dims(
tf.cast(
tf.not_equal(inputs, -1),
dtype
),
-1
)
return embedded * null_mask
else:
return tf.nn.embedding_lookup(W, inputs)
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(
tf.get_variable(
name + "_%d" % i,
[current_size] + shape[1:],
dtype=dtype
)
)
return shards
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = tf.get_variable_scope().name + "/" + concat_name + ":0"
for value in tf.get_collection(tf.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = tf.concat_v2(sharded_variable, 0, name=concat_name)
tf.add_to_collection(tf.GraphKeys.CONCATENATED_VARIABLES, concat_variable)
return concat_variable
class SequenceModel(object):
def __init__(self,
objectives,
features,
feature_index2words,
hidden_sizes,
keep_prob,
lr,
solver,
seed=1234,
input_keep_prob=0.7,
clip_norm=-1,
name="SequenceTagger",
cudnn=False,
anneal_rate=0.99,
trainable=True,
weight_noise=0.0,
class_weights_normalize=False,
faux_cudnn=False,
class_weights=False,
class_weights_clipval=1000.0,
freeze_rate=1.0,
fused=False,
freeze_rate_anneal=0.8,
create_variables=True):
if fused and objectives[0]["type"] == "crf":
fused = False
self.keep_prob = keep_prob
self.input_keep_prob = input_keep_prob
self.hidden_sizes = hidden_sizes
self.name = name
self.objectives = objectives
self.features = features
self.feature_index2words = feature_index2words
self.seed = seed
self.lr = lr
self.fused = fused
self.weight_noise = weight_noise
self.anneal_rate = anneal_rate
self.clip_norm = clip_norm
self.solver = solver
self.class_weights_normalize = class_weights_normalize
self.class_weights = class_weights
self.class_weights_clipval = class_weights_clipval
self.rng = np.random.RandomState(seed)
self.cudnn = cudnn
self.feature_word2index = [
{w: k for k, w in enumerate(index2word)} if index2word is not None else None
for index2word in self.feature_index2words
]
self.label2index = [
{w: k for k, w in enumerate(objective["vocab"])}
for objective in self.objectives
]
if create_variables:
# 1) build graph here (TF functional code pattern)
build_model(name=self.name,
trainable=trainable,
objectives=self.objectives,
features=self.features,
feature_index2words=self.feature_index2words,
hidden_sizes=self.hidden_sizes,
keep_prob=self.keep_prob,
solver=self.solver,
freeze_rate=freeze_rate,
class_weights_normalize=self.class_weights_normalize,
class_weights=self.class_weights,
class_weights_clipval=self.class_weights_clipval,
freeze_rate_anneal=freeze_rate_anneal,
cudnn=self.cudnn,
lr=self.lr,
fused=self.fused,
weight_noise=self.weight_noise,
anneal_rate=self.anneal_rate,
input_keep_prob=self.input_keep_prob,
faux_cudnn=faux_cudnn,
clip_norm=self.clip_norm)
# 2) and use meta graph to recover these fields:
self.recover_graph_variables()
def recover_graph_variables(self):
"""Use TF meta graph to obtain key metrics
and outputs from model."""
self.labels = tf.get_collection(LABEL_PLACEHOLDERS)
self.labels_mask = tf.get_collection(LABEL_MASK_PLACEHOLDERS)
self.input_placeholders = tf.get_collection(INPUT_PLACEHOLDERS)
self.sequence_lengths = tf.get_collection(SEQUENCE_LENGTHS)[0]
self.decoded = tf.get_collection(DECODED)
self.decoded_scores = tf.get_collection(DECODED_SCORES)
self.unary_scores = tf.get_collection(UNARY_SCORES)
self.token_correct = tf.get_collection(TOKEN_CORRECT)
self.token_correct_total = tf.get_collection(TOKEN_CORRECT_TOTAL)
self.sentence_correct = tf.get_collection(SENTENCE_CORRECT)
self.sentence_correct_total = tf.get_collection(SENTENCE_CORRECT_TOTAL)
self.token_correct_all = tf.get_collection(TOKEN_CORRECT_ALL)[0]
self.token_correct_all_total = tf.get_collection(TOKEN_CORRECT_ALL_TOTAL)[0]
self.sentence_correct_all = tf.get_collection(SENTENCE_CORRECT_ALL)[0]
self.sentence_correct_all_total = tf.get_collection(SENTENCE_CORRECT_ALL_TOTAL)[0]
self.true_positives = tf.get_collection(TRUE_POSITIVES)
self.false_positives = tf.get_collection(FALSE_POSITIVES)
self.false_negatives = tf.get_collection(FALSE_NEGATIVES)
if len(self.true_positives) == 0 and len(self.token_correct) != 0:
self.true_positives = [None for _ in self.token_correct]
self.false_positives = [None for _ in self.token_correct]
self.false_negatives = [None for _ in self.token_correct]
if len(tf.get_collection(GLOBAL_STEP)) > 0:
self.global_step = tf.get_collection(GLOBAL_STEP)[0]
else:
try:
self.global_step = tf.get_default_graph().get_tensor_by_name(
self.name + "/" + "global_step:0")
except KeyError:
self.global_step = tf.Variable(0, trainable=False, name="global_step")
tf.add_to_collection(GLOBAL_STEP, self.global_step)
self.is_training = tf.get_collection(IS_TRAINING)[0]
self.noop = tf.no_op()
self.train_op = tf.get_collection(TRAIN_OP)[0]
train_summaries = tf.get_collection(TRAIN_SUMMARIES)
self.train_summaries = train_summaries[0] if len(train_summaries) > 0 else None
self.nll = tf.get_collection(NLL)[0]
self.nll_total = tf.get_collection(NLL_TOTAL)[0]
self.saver = tf.train.Saver()
@classmethod
def overrideable_fields(cls):
return [
"keep_prob",
"name",
"lr",
"clip_norm",
"class_weights_normalize",
"class_weights_clipval",
"cudnn",
"anneal_rate",
"weight_noise",
"input_keep_prob"
]
@classmethod
def fields_to_save(cls):
return [
"hidden_sizes",
"objectives",
"name",
"cudnn",
"class_weights",
"features",
"fused",
"class_weights_normalize",
"weight_noise",
"anneal_rate",
"feature_index2words",
"solver",
"lr",
"clip_norm",
"keep_prob",
"input_keep_prob",
"class_weights_clipval"
]
def predict(self, session, feed_dict):
feed_dict[self.is_training] = False
outputs, outputs_probs = session.run(
(self.decoded, self.decoded_scores), feed_dict
)
predictions_out = {}
for value, val_prob, objective in zip(outputs, outputs_probs, self.objectives):
predictions_out[objective["name"]] = (value, val_prob)
return predictions_out
def predict_proba(self, session, feed_dict):
feed_dict[self.is_training] = False
outputs = session.run(
self.unary_scores, feed_dict
)
predictions_out = {}
for value, objective in zip(outputs, self.objectives):
predictions_out[objective["name"]] = value
return predictions_out
def save(self, session, path):
makedirs(path, exist_ok=True)
with open(join(path, "model.json"), "wt") as fout:
save_dict = {}
for field in type(self).fields_to_save():
save_dict[field] = getattr(self, field)
json.dump(save_dict, fout)
with open(join(path, "rng.pkl"), "wb") as fout:
pickle.dump(self.rng, fout)
save_session(session, self.saver, path, verbose=True)
@classmethod
def load(cls, session, path, args=None, verbose=True, trainable=True,
rebuild_graph=False, faux_cudnn=False, replace_to=None, replace_from=None):
"""Convenience method for using a tensorflow session to reload
a previously saved + serialized model from disk."""
with open(join(path, "model.json"), "rt") as fin:
model_props = json.load(fin)
# update fields based on CLI:
if args is not None:
ex_fields = explicitly_set_fields()
for field in cls.overrideable_fields():
if field in ex_fields:
model_props[field] = getattr(args, field)
# prune old fields based on changes to saveable fields:
relevant_props = {}
for field in cls.fields_to_save():
if field in model_props:
relevant_props[field] = model_props[field]
relevant_props["trainable"] = trainable
relevant_props["faux_cudnn"] = faux_cudnn
if rebuild_graph:
print("Using rebuild_graph mode: creating a new graph.", flush=True)
relevant_props["create_variables"] = True
model = cls(**relevant_props)
restore_session(
session, path,
replace_to=replace_to,
replace_from=replace_from,
verbose=verbose,
use_metagraph=False
)
else:
if model_props.get("cudnn", False):
import tensorflow.contrib.cudnn_rnn
relevant_props["create_variables"] = False
restore_session(
session, path,
verbose=verbose,
use_metagraph=True
)
model = cls(**relevant_props)
rng_path = join(path, "rng.pkl")
if exists(rng_path):
# apply the saved random number generator to this
# model:
with open(rng_path, "rb") as fin:
model.rng = pickle.load(fin)
return model
def make_path_absolute(obj, basepath):
copied = obj.copy()
for key in ["path", "vocab"]:
if key in copied:
copied[key] = join(basepath, copied[key])
return copied
class Config(object):
def __init__(self, datasets, features, objectives,
wikidata_path, classification_path):
assert(len(features) > 0)
self.datasets = datasets
self.features = features
self.objectives = objectives
self.classifications = None
self.wikidata_path = wikidata_path
self.classification_path = classification_path
# build the objective names:
self._named_objectives = [obj["name"] for obj in self.objectives]
@classmethod
def load(cls, path):
with open(path, "rt") as fin:
config = json.load(fin)
config_dirname = dirname(path)
return cls(
datasets=[make_path_absolute(dataset, config_dirname) for dataset in config['datasets']],
features=[make_path_absolute(feat, config_dirname) for feat in config['features']],
objectives=[make_path_absolute(objective, config_dirname) for objective in config['objectives']],
wikidata_path=config.get("wikidata_path", None),
classification_path=(
join(config_dirname, config.get("classification_path", None))
if "classification_path" in config else None)
)
def load_dataset_separate(self, dataset_type):
paths = [dataset for dataset in self.datasets if dataset["type"] == dataset_type]
all_examples = {}
for dataset in paths:
_, extension = splitext(dataset["path"])
if extension == ".h5" or extension == ".hdf5":
if self.classifications is None:
if self.wikidata_path is None or self.classification_path is None:
raise ValueError("missing wikidata_path and "
"classification_path, cannot "
"construct H5Dataset.")
self.classifications = ClassificationHandler(
self.wikidata_path,
self.classification_path
)
examples = H5Dataset(
dataset["path"],
dataset["x"],
dataset["y"],
self._named_objectives,
ignore_value=dataset.get('ignore', None),
classifications=self.classifications)
else:
examples = TSVDataset(
dataset["path"],
dataset["x"],
dataset["y"],
self._named_objectives,
comment=dataset.get('comment', '#'),
ignore_value=dataset.get('ignore', None),
retokenize=dataset.get('retokenize', False))
title = dataset["path"].split('/')[-1].split(".")[0]
name = title
iteration = 1
while name in all_examples:
name = title + "-%d" % (iteration,)
iteration += 1
all_examples[name] = examples
return all_examples
def load_dataset(self, dataset_type, merge=True):
datasets = self.load_dataset_separate(dataset_type)
if merge:
return CombinedDataset(list(datasets.values()))
return datasets
def boolean_argument(parser, name, default):
parser.add_argument("--" + name, action="store_true", default=default)
parser.add_argument("--no" + name, action="store_false", dest=name)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--anneal_rate', type=float, default=0.99)
parser.add_argument('--clip_norm', type=float, default=-1)
parser.add_argument('--weight_noise', type=float, default=0.0)
parser.add_argument('--hidden_sizes', type=int, nargs="*", default=[200, 200])
parser.add_argument('--load_dir', type=str, default=None)
parser.add_argument('--restore_input_features', type=str, default=None)
parser.add_argument('--improvement_key', type=str, default="token_correct")
parser.add_argument('--freeze_rate', type=float, default=1.0)
parser.add_argument('--freeze_rate_anneal', type=float, default=0.8)
parser.add_argument('--save_dir', type=str, default=None)
parser.add_argument('--max_epochs', type=int, default=1000)
parser.add_argument('--test_every', type=int, default=10000,
help="Number of training iterations after which testing should occur.")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--max_patience', type=int, default=10)
parser.add_argument('--class_weights_clipval', type=float, default=1000.0)
parser.add_argument('--device', type=str, default="gpu:0")
parser.add_argument('--keep_prob', type=float, default=0.5)
parser.add_argument('--input_keep_prob', type=float, default=0.7)
parser.add_argument('--solver', type=str, default="adam",
choices=["adam", "sgd"])
parser.add_argument("--name", type=str, default="SequenceTagger")
parser.add_argument("--old_name", type=str, default=None)
boolean_argument(parser, "cudnn", True)
boolean_argument(parser, "faux_cudnn", False)
boolean_argument(parser, "class_weights", False)
boolean_argument(parser, "rebuild_graph", False)
boolean_argument(parser, "class_weights_normalize", False)
boolean_argument(parser, "fused", True)
boolean_argument(parser, "report_metrics_per_axis", True)
boolean_argument(parser, "report_class_f1", False)
return parser.parse_args(args=args)
def get_vocab(dataset, max_vocab=-1, extra_words=None):
index2word = []
occurrence = {}
for el in dataset:
if el not in occurrence:
index2word.append(el)
occurrence[el] = 1
else:
occurrence[el] += 1
index2word = sorted(index2word, key=lambda x: occurrence[x], reverse=True)
if max_vocab > 0:
index2word = index2word[:max_vocab]
if extra_words is not None:
index2word = extra_words + index2word
return index2word
def get_objectives(objectives, dataset):
out = []
for obj_idx, objective in enumerate(objectives):
if "vocab" in objective:
with open(objective["vocab"], "rt") as fin:
vocab = fin.read().splitlines()
else:
vocab = get_vocab((w[obj_idx] for _, y in dataset for w in y if w[obj_idx] is not None), -1)
out.append(
{
"vocab": vocab,
"type": objective["type"],
"name": objective["name"]
}
)
return out
def merge_all_metrics(metrics):
out = {}
for key, metric in metrics.items():
for subkey, submetric in metric.items():
if len(key) > 0:
out[key + "_" + subkey] = submetric
if subkey not in out:
out[subkey] = submetric
else:
out[subkey] += submetric
else:
out[subkey] = submetric
return out
def log_outcome(logger, outcome, step, name):
for k, v in sorted(outcome.items()):
if "total" in k:
continue
else:
total = outcome[k + "_total"]
if total == 0:
continue
logger.log(k, v / total, step=step)
logger.writer.flush()
def compute_f1(metrics, objectives, report_class_f1):
total_f1 = 0.0
total_precision = 0.0
total_recall = 0.0
total = 0
for objective in objectives:
name = objective["name"]
key = "%s_true_positives" % (name,)
if key not in metrics:
continue
tp = metrics[key]
fp = metrics["%s_false_positives" % (name,)]
fn = metrics["%s_false_negatives" % (name,)]
del metrics[key]
del metrics["%s_false_positives" % (name,)]
del metrics["%s_false_negatives" % (name,)]
precision = 1.* tp / np.maximum((tp + fp), 1e-6)
recall = 1. * tp / np.maximum((tp + fn), 1e-6)
f1 = 2.0 * precision * recall / np.maximum((precision + recall), 1e-6)
support = tp + fn
full_f1 = np.average(f1, weights=support) * 100.0
full_recall = np.average(recall, weights=support) * 100.0
full_precision = np.average(precision, weights=support) * 100.0
total_f1 += full_f1
total_recall += full_recall
total_precision += full_precision
total += 1
if report_class_f1:
print("F1 %s: %r" % (name, full_f1))
print("Name\tF1\tTP\tFP\tFN")
rows = zip([label for label, has_support in zip(objective["vocab"],
support > 0)
if has_support],
f1, tp, fp, fn)
for val, f1_val, val_tp, val_fp, val_fn in rows:
print("%s\t%r\t%d\t%d\t%d" % (
val, f1_val, val_tp, val_fp, val_fn))
print("")
if total > 0:
metrics["F1"] = total_f1
metrics["recall"] = total_recall
metrics["precision"] = total_precision
metrics["F1_total"] = total
metrics["recall_total"] = total
metrics["precision_total"] = total
def accuracy(model, session, datasets, batch_size, train,
report_metrics_per_axis, report_class_f1,
callback=None,
callback_period=None, writer=None):
pbar = get_progress_bar("train" if train else "validation", item="batches")
if not isinstance(datasets, dict):
datasets = {'':datasets}
all_metrics_agg = {}
if callback is not None:
if callback_period is None:
raise ValueError("callback_period cannot be None if "
"callback is used.")
else:
callback_period = None
if train:
train_op = model.train_op
else:
train_op = model.noop
is_training = model.is_training
metrics = {"nll": model.nll, "nll_total": model.nll_total}
summaries = []
if not train:
metric_iter = zip(
model.objectives,
model.token_correct,
model.token_correct_total,
model.sentence_correct,
model.sentence_correct_total,
model.true_positives,
model.false_positives,
model.false_negatives
)
for metric_vars in metric_iter:
(
objective,
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total,
true_positives,
false_positives,
false_negatives
) = metric_vars
name = objective["name"]
if report_metrics_per_axis:
metrics["%s_token_correct" % (name,)] = token_correct
metrics["%s_token_correct_total" % (name,)] = token_correct_total
metrics["%s_sentence_correct" % (name,)] = sentence_correct
metrics["%s_sentence_correct_total" % (name,)] = sentence_correct_total
if true_positives is not None:
metrics["%s_true_positives" % (name,)] = true_positives
metrics["%s_false_positives" % (name,)] = false_positives
metrics["%s_false_negatives" % (name,)] = false_negatives
metrics["token_correct"] = model.token_correct_all
metrics["token_correct_total"] = model.token_correct_all_total
metrics["sentence_correct"] = model.sentence_correct_all
metrics["sentence_correct_total"] = model.sentence_correct_all_total
summaries = []
else:
if writer is not None and model.train_summaries is not None:
summaries = model.train_summaries
metrics_values = [v for _, v in sorted(metrics.items())]
metrics_names = [name for name, _ in sorted(metrics.items())]
outputs_val = [train_op, model.global_step, summaries, metrics_values]
for title, dataset in datasets.items():
batches = iter_batches_single_threaded(
model=model,
dataset=dataset,
batch_size=batch_size,
train=train,
pbar=pbar
)
metrics_agg = {}
iteration = 0
for feed_dict in batches:
feed_dict[is_training] = train
_, step, summary_out, outputs = session.run(outputs_val, feed_dict)
if writer is not None:
writer.add_summary(summary_out, step)
for key, value in zip(metrics_names, outputs[:len(metrics_names)]):
if key not in metrics_agg:
metrics_agg[key] = value
else:
metrics_agg[key] += value
iteration += 1
if callback_period is not None and iteration % callback_period == 0:
callback(iteration)
if np.isnan(metrics_agg['nll']):
print("loss is NaN.", flush=True, file=sys.stderr)
sys.exit(1)
compute_f1(metrics_agg, model.objectives, report_class_f1)
all_metrics_agg[title] = metrics_agg
del batches
return merge_all_metrics(all_metrics_agg)
def present_outcome(outcome, epoch, name):
string_rows = []
for k, v in sorted(outcome.items()):
if "total" in k:
continue
else:
total = outcome[k + "_total"]
if total == 0:
continue
if "correct" in k:
string_rows.append(
[
k,
"%.2f%%" % (100.0 * v / total),
"(%d correct / %d)" % (v, total)
]
)
else:
string_rows.append(
[
k,
"%.3f" % (v / total),
""
]
)
max_len_cols = [
max(len(row[colidx]) for row in string_rows)
for colidx in range(len(string_rows[0]))
] if len(string_rows) > 0 else []
rows = []
for row in string_rows:
rows.append(
" ".join(
[col + " " * (max_len_cols[colidx] - len(col))
for colidx, col in enumerate(row)]
)
)
return "\n".join(["Epoch {epoch}: {name}".format(epoch=epoch, name=name)] + rows)
def print_outcome(outcome, objectives, epoch, step, name, logger=None):
outcome_report = present_outcome(outcome, epoch, name)
if logger is not None:
log_outcome(logger, outcome, step, name)
print(outcome_report)
class SequenceTagger(object):
def __init__(self, path, device="gpu", faux_cudnn=False, rebuild_graph=False):
tf.reset_default_graph()
session_conf = tf.ConfigProto(
allow_soft_placement=True
)
self.session = tf.InteractiveSession(config=session_conf)
with tf.device(device):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self._model = SequenceModel.load(
self.session,
path,
args=None,
verbose=False,
trainable=False,
rebuild_graph=rebuild_graph,
faux_cudnn=faux_cudnn
)
@property
def objectives(self):
return self._model.objectives
def predict_proba(self, tokens):
blank_labels = tuple(None for _ in self._model.objectives)
batches = list(iter_batches_single_threaded(
model=self._model,
dataset=[
(tokens, [blank_labels for t in tokens])
],
batch_size=1,
train=False,
autoresize=False
))
outputs = []
batches[0][self._model.is_training] = False
probs_out = self._model.predict_proba(
self.session, batches[0]
)
return probs_out
def predict_proba_sentences(self, sentences):
blank_labels = tuple(None for _ in self._model.objectives)
batches = iter_batches_single_threaded(
model=self._model,
dataset=[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
)
for batch in batches:
batch[self._model.is_training] = False
yield self._model.predict_proba(
self.session, batch
)
def predict_topk_sentences(self, sentences, k=5):
blank_labels = tuple(None for _ in self._model.objectives)
batches = iter_batches_single_threaded(
model=self._model,
dataset=[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
)
for batch in batches:
outputs = self._model.predict_proba(
self.session, batch
)
named_outputs = {}
for objective in self._model.objectives:
obj_name = objective["name"]
tags, scores = outputs[obj_name]
if objective["type"] == "crf":
named_outputs[obj_name] = [
[(token, [objective["vocab"][tag]], [score]) for token, tag in zip(tokens, tags)]
for tokens, tags, score in zip(sentences, tags, scores)
]
elif objective["type"] == 'softmax':
all_sent_scores = []
for tokens, scores in zip(sentences, scores):
sent_scores = []
for token, token_scores in zip(tokens, scores):
topk = np.argsort(token_scores)[::-1][:k]
sent_scores.append(
(
token,
[objective["vocab"][idx] for idx in topk],
[token_scores[idx] for idx in topk]
)
)
all_sent_scores.append(sent_scores)
named_outputs[obj_name] = all_sent_scores
else:
raise ValueError("unknown objective type %r." % (objective["type"],))
yield named_outputs
def tag_sentences(self, sentences):
if len(sentences) == 0:
return {
objective["name"]: []
for objective in self._model.objectives
}
blank_labels = tuple(None for _ in self._model.objectives)
batches = list(iter_batches_single_threaded(
self._model,
[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
))
named_outputs = {}
sentence_idx = 0
for batch in batches:
outputs = self._model.predict(self.session, batch)
for objective in self._model.objectives:
obj_name = objective["name"]
if obj_name not in named_outputs:
named_outputs[obj_name] = []
tags, scores = outputs[obj_name]
nsentences = len(tags)
if objective["type"] == "crf":
named_outputs[obj_name].extend([
[(token, objective["vocab"][tag], score) for token, tag in zip(tokens, tags)]
for tokens, tags, score in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)
])
elif objective["type"] == 'softmax':
named_outputs[obj_name].extend([
[(token, objective["vocab"][tag], score)
for token, tag, score in zip(tokens, tags, scores)]
for tokens, tags, scores in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)
])
else:
raise ValueError("unknown objective type %r." % (objective["type"],))
sentence_idx += nsentences
return named_outputs
def count_number_of_parameters():
return int(sum([np.prod(var.get_shape().as_list())
for var in tf.trainable_variables()]))
class TestCallback(object):
def __init__(self, model, session, dataset, epoch, args, logger):
self.model = model
self.session = session
self.dataset = dataset
self.epoch = epoch
self.args = args
self.logger = logger
self.report_metrics_per_axis = args.report_metrics_per_axis
self.report_class_f1 = args.report_class_f1
def test(self, iteration):
dev_outcome = accuracy(self.model, self.session, self.dataset, self.args.batch_size,
train=False, report_metrics_per_axis=self.report_metrics_per_axis,
report_class_f1=self.report_class_f1)
print_outcome(dev_outcome, self.model.objectives,
epoch="{}-{}".format(self.epoch, iteration),
step=self.session.run(self.model.global_step),
name="validation",
logger=self.logger
)
if self.args.save_dir is not None:
self.model.save(self.session, self.args.save_dir)
def compute_epoch(session, model, train_set,
validation_set, test_callback, epoch,
train_writer, test_writer,
args):
test_callback.epoch = epoch
train_outcome = accuracy(model,
session,
train_set,
args.batch_size,
train=True,
callback_period=args.test_every,
writer=train_writer.writer if train_writer is not None else None,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1,
callback=test_callback.test)
global_step = session.run(model.global_step)
print_outcome(train_outcome,
model.objectives,
epoch=epoch,
name="train",
step=global_step,
logger=train_writer)
dev_outcome = accuracy(
model, session, validation_set, args.batch_size,
train=False,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1)
print_outcome(dev_outcome,
model.objectives,
epoch=epoch,
step=global_step,
name="validation",
logger=test_writer)
if args.save_dir is not None:
model.save(session, args.save_dir)
return dev_outcome
def main():
args = parse_args()
config = Config.load(args.config)
validation_set = config.load_dataset("dev", merge=False)
session_conf = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=session_conf) as session, tf.device(args.device):
if args.load_dir is not None:
model = SequenceModel.load(session, args.load_dir,
args=args, rebuild_graph=args.rebuild_graph, faux_cudnn=args.faux_cudnn,
replace_to=args.name,
replace_from=args.old_name)
dev_outcome = accuracy(
model, session, validation_set, args.batch_size, train=False,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1)
print_outcome(dev_outcome,
model.objectives, 0,
name="loaded validation",
step=session.run(model.global_step),
logger=None)
# dev_outcome = None
if args.rebuild_graph and args.save_dir is not None:
model.save(session, args.save_dir)
train_set = config.load_dataset("train")
else:
# load classes and index2word from a file.
dev_outcome = None
train_set = config.load_dataset("train")
model = SequenceModel(
objectives=get_objectives(config.objectives, train_set),
features=config.features,
feature_index2words=get_feature_vocabs(config.features, train_set, ["<UNK>"]),
lr=args.lr,
anneal_rate=args.anneal_rate,
weight_noise=args.weight_noise,
freeze_rate=args.freeze_rate,
freeze_rate_anneal=args.freeze_rate_anneal,
clip_norm=args.clip_norm,
hidden_sizes=args.hidden_sizes,
solver=args.solver,
fused=args.fused,
class_weights_normalize=args.class_weights_normalize,
class_weights=args.class_weights,
class_weights_clipval=args.class_weights_clipval,
keep_prob=args.keep_prob,
input_keep_prob=args.input_keep_prob,
name=args.name,
cudnn=args.cudnn,
faux_cudnn=args.faux_cudnn,
create_variables=True)
session.run(tf.global_variables_initializer())
if args.restore_input_features is not None:
restore_session(
session, args.restore_input_features,
verbose=True,
use_metagraph=False,
only_features=True)
print("Model has {} trainable parameters.".format(count_number_of_parameters()), flush=True)
best_dev_score = 0.0
patience = 0
best_epoch = 0
best_outcome = None
improvement_key = args.improvement_key
if dev_outcome is not None:
best_dev_score = dev_outcome[improvement_key]
best_epoch = -1
best_outcome = dev_outcome
if args.save_dir is not None:
train_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "train")))
test_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "test")))
else:
train_writer, test_writer = None, None
test_callback = TestCallback(model,
session,
validation_set,
-1,
args,
logger=test_writer)
if len(train_set) > 0:
train_set.set_randomize(True)
train_set.set_rng(model.rng)
for epoch in range(args.max_epochs):
dev_outcome = compute_epoch(
session, model,
train_set=train_set, validation_set=validation_set,
epoch=epoch, test_callback=test_callback,
train_writer=train_writer,
test_writer=test_writer,
args=args)
if dev_outcome[improvement_key] > best_dev_score:
best_dev_score = dev_outcome[improvement_key]
best_epoch = epoch
best_outcome = dev_outcome
patience = 0
if args.save_dir is not None:
model.save(session, join(args.save_dir, "best"))
else:
patience += 1
if patience >= args.max_patience:
print("No improvements for {} epochs. Stopping.".format(args.max_patience))
break
del dev_outcome
print_outcome(
best_outcome,
model.objectives,
epoch=best_epoch,
name="validation-best",
step=session.run(model.global_step),
logger=None)
if __name__ == "__main__":
main()
|
import numpy as np
import subprocess
import h5py
import ciseau
from os.path import exists, splitext, join
from wikidata_linker_utils.wikidata_ids import load_wikidata_ids
def count_examples(lines, comment, ignore_value, column_indices):
example_length = 0
has_labels = False
found = 0
for line in lines:
if len(line) == 0 or (comment is not None and line.startswith(comment)):
if example_length > 0 and has_labels:
found += 1
example_length = 0
has_labels = False
else:
example_length += 1
if not has_labels:
cols = line.split("\t")
if len(cols) > 1:
if ignore_value is not None:
for col_index in column_indices:
if cols[col_index] != ignore_value:
has_labels = True
break
else:
has_labels = True
if example_length > 0 and has_labels:
found += 1
return found
def retokenize_example(x, y):
tokens = ciseau.tokenize(" ".join(w for w in x),
normalize_ascii=False)
out_y = []
regular_cursor = 0
tokens_length_total = 0
regular_length_total = len(x[regular_cursor]) + 1 if len(x) > 0 else 0
if regular_cursor + 1 == len(x):
regular_length_total -= 1
for i in range(len(tokens)):
tokens_length_total = tokens_length_total + len(tokens[i])
while regular_length_total < tokens_length_total:
regular_cursor += 1
regular_length_total = regular_length_total + len(x[regular_cursor]) + 1
if regular_cursor + 1 == len(x):
regular_length_total -= 1
out_y.append(y[regular_cursor])
assert(regular_cursor + 1 == len(x)), "error with %r" % (x,)
return ([tok.rstrip() for tok in tokens], out_y)
def convert_lines_to_examples(lines, comment, ignore_value,
column_indices, x_column, empty_column,
retokenize=False):
examples = []
x = []
y = []
for line in lines:
if len(line) == 0 or (comment is not None and line.startswith(comment)):
if len(x) > 0:
if not all(row == empty_column for row in y):
examples.append((x, y))
x = []
y = []
else:
cols = line.split("\t")
x.append(cols[x_column])
if len(cols) == 1:
y.append(empty_column)
else:
if ignore_value is not None:
y.append(
tuple(
cols[col_index] if col_index is not None and cols[col_index] != ignore_value else None
for col_index in column_indices
)
)
else:
y.append(
tuple(
cols[col_index] if col_index is not None else None
for col_index in column_indices
)
)
if len(x) > 0 and not all(row == empty_column for row in y):
examples.append((x, y))
if retokenize:
examples = [retokenize_example(x, y) for x, y in examples]
return examples
def load_tsv(path, x_column, y_columns, objective_names, comment, ignore_value,
retokenize):
""""
Deprecated method for loading a tsv file as a training/test set for a model.
Arguments:
----------
path: str, location of tsv file
x_column: int
y_columns: list<dict>, objectives in this file along with their column.
(e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`)
objective_names: name of all desired columns
comment: line beginning indicating it's okay to skip
ignore_value: label value that should be treated as missing
retokenize: run tokenizer again.
Returns
-------
list<tuple> : examples loaded into memory
Note: can use a lot of memory since entire file is loaded.
"""
objective2column = {col['objective']: col['column'] for col in y_columns}
column_indices = [objective2column.get(name, None) for name in objective_names]
empty_column = tuple(None for _ in objective_names)
if all(col_index is None for col_index in column_indices):
return []
with open(path, "rt") as fin:
lines = fin.read().splitlines()
return convert_lines_to_examples(lines,
ignore_value=ignore_value,
empty_column=empty_column,
x_column=x_column,
column_indices=column_indices,
comment=comment,
retokenize=retokenize)
class RandomizableDataset(object):
def set_rng(self, rng):
self.rng = rng
def set_randomize(self, randomize):
self.randomize = randomize
def set_ignore_y(self, ignore):
self.ignore_y = ignore
class TSVDataset(RandomizableDataset):
_fhandle = None
_fhandle_position = 0
_examples = None
_example_indices = None
_example_index = 0
_eof = False
ignore_y = False
def __init__(self, path, x_column, y_columns, objective_names, comment, ignore_value,
retokenize=False, chunksize=50000000, randomize=False, rng=None):
""""
Arguments:
----------
path: str, location of tsv file
x_column: int
y_columns: list<dict>, objectives in this file along with their column.
(e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`)
objective_names: name of all desired columns
comment: line beginning indicating it's okay to skip
ignore_value: label value that should be treated as missing
chunksize: how many bytes to read from the file at a time.
rng: numpy RandomState
retokenize: run tokenizer on x again.
"""
self.path = path
self.randomize = randomize
self.x_column = x_column
self.y_columns = y_columns
self.objective_names = objective_names
self.comment = comment
self.ignore_value = ignore_value
self.retokenize = retokenize
self.chunksize = chunksize
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
# column picking setup:
objective2column = {col['objective']: col['column'] for col in y_columns}
self.column_indices = [objective2column.get(name, None) for name in objective_names]
self.empty_column = tuple(None for _ in objective_names)
if all(col_index is None for col_index in self.column_indices):
self.length = 0
else:
self._compute_length()
def _signature(self):
try:
file_sha1sum = subprocess.check_output(
["sha1sum", self.path], universal_newlines=True
).split(" ")[0]
except FileNotFoundError:
file_sha1sum = subprocess.check_output(
["shasum", self.path], universal_newlines=True
).split(" ")[0]
sorted_cols = list(
map(
str,
sorted(
[col for col in self.column_indices if col is not None]
)
)
)
return "-".join([file_sha1sum] + sorted_cols)
def _compute_length(self):
length_file = (
splitext(self.path)[0] +
"-length-" +
self._signature() + ".txt"
)
if exists(length_file):
with open(length_file, "rt") as fin:
total = int(fin.read())
else:
total = 0
while True:
total += self._count_examples()
if self._eof:
break
with open(length_file, "wt") as fout:
fout.write(str(total) + "\n")
self.length = total
def __len__(self):
return self.length
def close(self):
if self._fhandle is not None:
self._fhandle.close()
self._fhandle = None
self._fhandle_position = 0
self._eof = False
self._examples = None
self._example_indices = None
def __del__(self):
self.close()
def _read_file_until_newline(self):
if self._fhandle is None:
self._fhandle = open(self.path, "rb")
if self._eof:
self._fhandle_position = 0
self._fhandle.seek(0)
self._eof = False
read_chunk = None
while True:
new_read_chunk = self._fhandle.read(self.chunksize)
if read_chunk is None:
read_chunk = new_read_chunk
else:
read_chunk += new_read_chunk
if len(new_read_chunk) < self.chunksize:
del new_read_chunk
self._fhandle_position += len(read_chunk)
self._eof = True
break
else:
del new_read_chunk
newline_pos = read_chunk.rfind(b"\n\n")
if newline_pos != -1:
# move to last line end position (so that we don't get
# half an example.)
self._fhandle.seek(self._fhandle_position + newline_pos + 2)
self._fhandle_position += newline_pos + 2
read_chunk = read_chunk[:newline_pos]
break
return read_chunk
def _count_examples(self):
read_chunk = self._read_file_until_newline()
return count_examples(
read_chunk.decode("utf-8").splitlines(),
ignore_value=self.ignore_value,
column_indices=self.column_indices,
comment=self.comment
)
def _load_examples(self):
read_chunk = self._read_file_until_newline()
if self._examples is not None:
del self._examples
self._examples = convert_lines_to_examples(
read_chunk.decode("utf-8").splitlines(),
ignore_value=self.ignore_value,
empty_column=self.empty_column,
x_column=self.x_column,
column_indices=self.column_indices,
comment=self.comment,
retokenize=self.retokenize
)
self._example_indices = np.arange(len(self._examples))
if self.randomize:
# access loaded data randomly:
self.rng.shuffle(self._example_indices)
self._example_index = 0
def __getitem__(self, index):
"""Retrieve the next example (index is ignored)"""
if index >= self.length:
raise StopIteration()
if self._example_indices is None or self._example_index == len(self._example_indices):
self._load_examples()
while len(self._examples) == 0:
self._load_examples()
if len(self._examples) > 0:
break
if self._eof:
raise StopIteration()
ex = self._examples[self._example_indices[self._example_index]]
self._example_index += 1
return ex
def set_randomize(self, randomize):
if randomize != self.randomize:
self.randomize = randomize
def close(self):
if self._fhandle is not None:
self._fhandle.close()
self._fhandle = None
class OracleClassification(object):
def __init__(self, classes, classification, path):
self.classes = classes
self.classification = classification
self.path = path
self.contains_other = self.classes[-1] == "other"
def classify(self, index):
return self.classification[index]
def load_oracle_classification(path):
with open(join(path, "classes.txt"), "rt", encoding="UTF-8") as fin:
classes = fin.read().splitlines()
classification = np.load(join(path, "classification.npy"))
return OracleClassification(classes, classification, path)
class ClassificationHandler(object):
def __init__(self, wikidata_path, classification_path):
self.classification_path = classification_path
_, self.name2index = load_wikidata_ids(wikidata_path, verbose=False)
self.classifiers = {}
def get_classifier(self, name):
if name not in self.classifiers:
self.classifiers[name] = load_oracle_classification(
join(self.classification_path, name)
)
return self.classifiers[name]
class H5Dataset(RandomizableDataset):
handle_open = False
ignore_y = False
_max_generated_example = 0
_min_generated_example = 0
def __init__(self, path, x_column, y_columns, objective_names,
classifications, ignore_value, randomize=False, rng=None):
self.x_column = str(x_column)
self.y_columns = y_columns
self.ignore_value = ignore_value
self.objective_names = objective_names
self.randomize = randomize
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
self._classifications = classifications
self.handle = h5py.File(path, "r")
self.path = path
self.handle_open = True
self.length = len(self.handle[self.x_column])
self.chunksize = self.handle[self.x_column].chunks[0]
self._example_indices = None
objective2column = {
col['objective']: (
str(col['column']),
self._classifications.get_classifier(col['classification'])
) for col in y_columns
}
if self.ignore_value is not None:
for _, classifier in objective2column.values():
if self.ignore_value in classifier.classes:
classifier.classes[classifier.classes.index(self.ignore_value)] = None
self.column2col_indices = {}
for col_idx, name in enumerate(self.objective_names):
if name not in objective2column:
continue
column, classifier = objective2column[name]
if column not in self.column2col_indices:
self.column2col_indices[column] = [(classifier, col_idx)]
else:
self.column2col_indices[column].append((classifier, col_idx))
def close(self):
if self.handle_open:
self.handle.close()
self.handle_open = False
def __del__(self):
self.close()
def __len__(self):
return self.length
def _build_examples(self, index):
x = [x_chunk.split("\n") for x_chunk in self.handle[self.x_column][index:index + self.chunksize]]
y = [[[None for k in range(len(self.objective_names))] for j in range(len(x[i]))] for i in range(len(x))]
if not self.ignore_y:
for handle_column, col_content in self.column2col_indices.items():
col_ids = [[self._classifications.name2index[name] if name != "" else None
for name in y_chunk.split("\n")]
for y_chunk in self.handle[handle_column][index:index + self.chunksize]]
for i in range(len(col_ids)):
for j, idx in enumerate(col_ids[i]):
if idx is not None:
for classifier, k in col_content:
y[i][j][k] = classifier.classify(idx)
return x, y
def set_randomize(self, randomize):
if self.randomize != randomize:
self.randomize = randomize
if self._max_generated_example != self._min_generated_example:
self.xorder = np.arange(self._min_generated_example, self._max_generated_example)
self.rng.shuffle(self.xorder)
def __getitem__(self, index):
if index >= len(self):
raise StopIteration()
if self.randomize:
if self._example_indices is None or index == 0:
self._example_indices = np.arange(0, len(self), self.chunksize)
self.rng.shuffle(self._example_indices)
# transformed index:
index = (self._example_indices[index // self.chunksize] + (index % self.chunksize)) % len(self)
if index < self._min_generated_example or index >= self._max_generated_example:
self.x, self.y = self._build_examples(index)
# store bounds of generated data:
self._min_generated_example = index
self._max_generated_example = index + len(self.x)
if self.randomize:
self.xorder = np.arange(self._min_generated_example, self._max_generated_example)
self.rng.shuffle(self.xorder)
if self.randomize:
index = self.xorder[index - self._min_generated_example]
return self.x[index - self._min_generated_example], self.y[index - self._min_generated_example]
class CombinedDataset(object):
_which_dataset = None
_dataset_counters = None
def set_rng(self, rng):
self.rng = rng
for dataset in self.datasets:
dataset.rng = rng
def set_randomize(self, randomize):
self.randomize = randomize
for dataset in self.datasets:
dataset.set_randomize(randomize)
def set_ignore_y(self, ignore):
for dataset in self.datasets:
dataset.set_ignore_y(ignore)
def close(self):
for dataset in self.datasets:
dataset.close()
def _build_which_dataset(self):
self._which_dataset = np.empty(self.length, dtype=np.int16)
self._dataset_counters = np.zeros(len(self.datasets), dtype=np.int64)
offset = 0
for index, dataset in enumerate(self.datasets):
# ensure each dataset is seen as much as its content
# says:
self._which_dataset[offset:offset + len(dataset)] = index
offset += len(dataset)
def __getitem__(self, index):
if index == 0:
if self.randomize:
# visit datasets in random orders:
self.rng.shuffle(self._which_dataset)
self._dataset_counters[:] = 0
which = self._which_dataset[index]
idx = self._dataset_counters[which]
self._dataset_counters[which] += 1
return self.datasets[which][idx]
def __init__(self, datasets, rng=None, randomize=False):
self.datasets = datasets
if rng is None:
rng = np.random.RandomState(0)
self.set_rng(rng)
self.set_randomize(randomize)
self.length = sum(len(dataset) for dataset in datasets)
self._build_which_dataset()
def __len__(self):
return self.length
|
import queue
import threading
def prefetch_generator(generator, to_fetch=10):
q = queue.Queue(maxsize=to_fetch)
def thread_worker(queue, gen):
for val in gen:
queue.put(val)
queue.put(None)
t = threading.Thread(target=thread_worker, args=(q, generator))
some_exception = None
try:
t.start()
while True:
job = q.get()
if job is None:
break
yield job
del job
# print("q.qsize() %d" % (q.qsize(),), flush=True)
except Exception as e:
some_exception = e
finally:
if some_exception is not None:
raise some_exception
t.join()
del t
|