python_code
stringlengths 0
66.4k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hashlib
from pathlib import Path
from access.preprocess import get_parallel_file_pair_preprocessor
from access.preprocessors import dump_preprocessors, load_preprocessors
from access.resources.paths import PHASES, get_dataset_dir, get_data_filepath, get_filepaths_dict
from access.utils.helpers import count_lines, read_lines, create_directory_or_skip
def yield_indexes_of_lines(filepath, lines):
lines = set(lines)
with Path(filepath).open('r') as f:
for idx, line in enumerate(f):
if line.strip('\n') in lines:
yield idx
def sort_files_by_line_count(filepaths):
return sorted(filepaths, key=lambda filepath: count_lines(filepath))
def has_lines_in_common(filepath1, filepath2):
[smallest_filepath, largest_filepath] = sort_files_by_line_count([filepath1, filepath2])
for idx in yield_indexes_of_lines(largest_filepath, read_lines(smallest_filepath)):
return True
return False
def get_preprocessed_dataset_name(dataset, preprocessor):
return '_' + hashlib.md5((dataset + preprocessor.get_hash()).encode()).hexdigest()
def create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs):
new_dataset = get_preprocessed_dataset_name(dataset, preprocessor)
with create_directory_or_skip(get_dataset_dir(new_dataset)):
print(f'Creating preprocessed dataset with {preprocessor}: {dataset} -> {new_dataset}')
new_dataset_dir = get_dataset_dir(new_dataset)
filepaths_dict = get_filepaths_dict(dataset)
new_filepaths_dict = get_filepaths_dict(new_dataset)
for phase in PHASES:
if not filepaths_dict[phase, 'complex'].exists() or not filepaths_dict[phase, 'complex'].exists():
continue
parallel_file_pair_preprocessor = get_parallel_file_pair_preprocessor(
preprocessor.encode_file_pair,
n_jobs=n_jobs,
)
parallel_file_pair_preprocessor(filepaths_dict[phase, 'complex'], filepaths_dict[phase, 'simple'],
new_filepaths_dict[phase, 'complex'], new_filepaths_dict[phase, 'simple'])
previous_preprocessors = load_preprocessors(get_dataset_dir(dataset))
if previous_preprocessors is not None:
preprocessors = previous_preprocessors + [preprocessor]
else:
preprocessors = [preprocessor]
dump_preprocessors(preprocessors, new_dataset_dir)
with open(new_dataset_dir / 'original_dataset', 'w') as f:
f.write(dataset + '\n')
return new_dataset
def create_preprocessed_dataset(dataset, preprocessors, n_jobs=1):
for preprocessor in preprocessors:
# Fit preprocessor on input dataset
preprocessor.fit(get_data_filepath(dataset, 'train', 'complex'), get_data_filepath(dataset, 'train', 'simple'))
dataset = create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs)
return dataset
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import bz2
import gzip
import os
from pathlib import Path
import shutil
import sys
import tarfile
import tempfile
import time
from urllib.request import urlretrieve
import zipfile
import git
from tqdm import tqdm
def reporthook(count, block_size, total_size):
# Download progress bar
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size_mb = count * block_size / (1024 * 1024)
speed = progress_size_mb / duration
percent = int(count * block_size * 100 / total_size)
msg = f'\r... {percent}% - {int(progress_size_mb)} MB - {speed:.2f} MB/s - {int(duration)}s'
sys.stdout.write(msg)
def download(url, destination_path):
print('Downloading...')
try:
urlretrieve(url, destination_path, reporthook)
sys.stdout.write('\n')
except (Exception, KeyboardInterrupt, SystemExit):
print('Rolling back: remove partially downloaded file')
os.remove(destination_path)
raise
def download_and_extract(url):
tmp_dir = Path(tempfile.mkdtemp())
compressed_filename = url.split('/')[-1]
compressed_filepath = tmp_dir / compressed_filename
download(url, compressed_filepath)
print('Extracting...')
return extract(compressed_filepath, tmp_dir)
def extract(filepath, output_dir):
# Infer extract method based on extension
extensions_to_methods = {
'.tar.gz': untar,
'.tar.bz2': untar,
'.tgz': untar,
'.zip': unzip,
'.gz': ungzip,
'.bz2': unbz2,
}
def get_extension(filename, extensions):
possible_extensions = [ext for ext in extensions if filename.endswith(ext)]
if len(possible_extensions) == 0:
raise Exception(f'File {filename} has an unknown extension')
# Take the longest (.tar.gz should take precedence over .gz)
return max(possible_extensions, key=lambda ext: len(ext))
filename = os.path.basename(filepath)
extension = get_extension(filename, list(extensions_to_methods))
extract_method = extensions_to_methods[extension]
# Extract files in a temporary dir then move the extracted item back to
# the ouput dir in order to get the details of what was extracted
tmp_extract_dir = tempfile.mkdtemp()
# Extract
extract_method(filepath, output_dir=tmp_extract_dir)
extracted_items = os.listdir(tmp_extract_dir)
output_paths = []
for name in extracted_items:
extracted_path = os.path.join(tmp_extract_dir, name)
output_path = os.path.join(output_dir, name)
move_with_overwrite(extracted_path, output_path)
output_paths.append(output_path)
return output_paths
def move_with_overwrite(source_path, target_path):
if os.path.isfile(target_path):
os.remove(target_path)
if os.path.isdir(target_path) and os.path.isdir(source_path):
shutil.rmtree(target_path)
shutil.move(source_path, target_path)
def untar(compressed_path, output_dir):
with tarfile.open(compressed_path) as f:
f.extractall(output_dir)
def unzip(compressed_path, output_dir):
with zipfile.ZipFile(compressed_path, 'r') as f:
f.extractall(output_dir)
def ungzip(compressed_path, output_dir):
filename = os.path.basename(compressed_path)
assert filename.endswith('.gz')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, filename[:-3])
with gzip.open(compressed_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def unbz2(compressed_path, output_dir):
extract_filename = os.path.basename(compressed_path).replace('.bz2', '')
extract_path = os.path.join(output_dir, extract_filename)
with bz2.BZ2File(compressed_path, 'rb') as compressed_file, open(extract_path, 'wb') as extract_file:
for data in tqdm(iter(lambda: compressed_file.read(1024 * 1024), b'')):
extract_file.write(data)
def add_newline_at_end_of_file(file_path):
with open(file_path, 'r') as f:
last_character = f.readlines()[-1][-1]
if last_character == '\n':
return
print(f'Adding newline at the end of {file_path}')
with open(file_path, 'a') as f:
f.write('\n')
def git_clone(url, output_dir, overwrite=True):
if Path(output_dir).exists():
shutil.rmtree(output_dir)
git.Repo.clone_from(url, output_dir)
def replace_lrb_rrb_file(filepath):
tmp_filepath = filepath + '.tmp'
with open(filepath, 'r') as input_file, open(tmp_filepath, 'w') as output_file:
for line in input_file:
output_file.write(line.replace('-lrb-', '(').replace('-rrb-', ')'))
os.rename(tmp_filepath, filepath)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from glob import glob
import os
from pathlib import Path
import shutil
import tempfile
import numpy as np
from access.text import word_tokenize
from access.utils.helpers import (yield_lines_in_parallel, write_lines_in_parallel, create_directory_or_skip,
lock_directory)
from access.preprocess import replace_lrb_rrb, replace_lrb_rrb_file, normalize_quotes
from access.resources.utils import download_and_extract, add_newline_at_end_of_file, git_clone
from access.resources.paths import (FASTTEXT_EMBEDDINGS_PATH, get_dataset_dir, get_data_filepath, PHASES, MODELS_DIR,
BEST_MODEL_DIR)
def prepare_wikilarge():
dataset = 'wikilarge'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/louismartin/dress-data/raw/master/data-simplification.tar.bz2'
extracted_path = download_and_extract(url)[0]
# Only rename files and put them in local directory architecture
for phase in PHASES:
for (old_language_name, new_language_name) in [('src', 'complex'), ('dst', 'simple')]:
old_path_glob = os.path.join(extracted_path, dataset, f'*.ori.{phase}.{old_language_name}')
globs = glob(old_path_glob)
assert len(globs) == 1
old_path = globs[0]
new_path = get_data_filepath(dataset, phase, new_language_name)
shutil.copyfile(old_path, new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
add_newline_at_end_of_file(new_path)
return dataset
def prepare_turkcorpus_lower():
dataset = 'turkcorpus_lower'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print(output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_lower_dir = output_dir / 'data/turkcorpus'
print(turkcorpus_lower_dir)
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
for (old_language_name, new_language_name) in [('norm', 'complex'), ('simp', 'simple')]:
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.{old_language_name}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, new_language_name)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
for i in range(8):
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.turk.{i}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i=i)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
print('Done.')
return dataset
def prepare_turkcorpus():
dataset = 'turkcorpus'
with create_directory_or_skip(get_dataset_dir(dataset)):
# Import here to avoid circular imports
from access.feature_extraction import get_levenshtein_similarity
prepare_turkcorpus_lower()
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_truecased_dir = output_dir / 'data/turkcorpus/truecased'
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
# (1) read the .tsv for which each line is tab separated:
# `idx, complex_sentence, *turk_sentences = line.split('\t')`
# (2) replace lrb and rrb, tokenize
# (3) Turk sentences are shuffled for each sample so need to realign them with turkcorpus lower
tsv_filepath = turkcorpus_truecased_dir / f'{old_phase}.8turkers.organized.tsv'
output_complex_filepath = get_data_filepath(dataset, new_phase, 'complex')
output_ref_filepaths = [get_data_filepath(dataset, new_phase, 'simple.turk', i) for i in range(8)]
# These files will be used to reorder the shuffled ref sentences
ordered_ref_filepaths = [
get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i) for i in range(8)
]
with write_lines_in_parallel([output_complex_filepath] + output_ref_filepaths) as files:
input_filepaths = [tsv_filepath] + ordered_ref_filepaths
for tsv_line, *ordered_ref_sentences in yield_lines_in_parallel(input_filepaths):
sample_id, complex_sentence, *shuffled_ref_sentences = [
word_tokenize(normalize_quotes(replace_lrb_rrb(s))) for s in tsv_line.split('\t')
]
reordered_sentences = []
for ordered_ref_sentence in ordered_ref_sentences:
# Find the position of the ref_sentence in the shuffled sentences
similarities = [
get_levenshtein_similarity(ordered_ref_sentence.replace(' ', ''),
shuffled_ref_sentence.lower().replace(' ', ''))
for shuffled_ref_sentence in shuffled_ref_sentences
]
idx = np.argmax(similarities)
# A few sentences have differing punctuation marks
assert similarities[idx] > 0.98, \
f'{ordered_ref_sentence} != {shuffled_ref_sentences[idx].lower()} {similarities[idx]:.2f}'
reordered_sentences.append(shuffled_ref_sentences.pop(idx))
assert len(shuffled_ref_sentences) == 0
assert len(reordered_sentences) == 8
files.write([complex_sentence] + reordered_sentences)
return dataset
def prepare_fasttext_embeddings():
FASTTEXT_EMBEDDINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
with lock_directory(FASTTEXT_EMBEDDINGS_PATH.parent):
if FASTTEXT_EMBEDDINGS_PATH.exists():
return
url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, FASTTEXT_EMBEDDINGS_PATH)
def prepare_models():
MODELS_DIR.mkdir(parents=True, exist_ok=True)
if not BEST_MODEL_DIR.exists():
url = 'http://dl.fbaipublicfiles.com/access/best_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, BEST_MODEL_DIR)
all_parameters_model_dir = MODELS_DIR / 'all_parameters_model'
if not all_parameters_model_dir.exists():
url = 'http://dl.fbaipublicfiles.com/access/all_parameters_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, all_parameters_model_dir)
return BEST_MODEL_DIR
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
from functools import lru_cache
import shutil
from nevergrad.instrumentation import Instrumentation
from nevergrad.optimization import optimizerlib
import re
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.evaluation.utils import combine_metrics
from access.fairseq.base import (fairseq_preprocess, fairseq_train, fairseq_generate, get_fairseq_exp_dir,
)
from access.resources.datasets import has_lines_in_common
from access.preprocessors import get_preprocessors, get_preprocessor_by_name
from access.resources.datasets import create_preprocessed_dataset
from access.resources.paths import get_data_filepath, get_dataset_dir
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.utils.training import (print_method_name, print_args, print_result, print_running_time,
)
from access.utils.helpers import get_allowed_kwargs
def check_dataset(dataset):
# Sanity check with evaluation dataset
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'valid', 'complex'))
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'test', 'complex'))
def prepare_exp_dir():
exp_dir = get_fairseq_exp_dir()
if exp_dir.exists():
# Remove exp dir to prevent conflicts with requeue and non deterministic args
# https://github.com/fairinternal/dfoptim/issues/126 #private
shutil.rmtree(exp_dir)
exp_dir.mkdir(parents=True)
return exp_dir
def get_simplifier(exp_dir, preprocessors_kwargs, generate_kwargs):
# TODO: Take kwargs as input and separate between get_preprocessors kwargs and generate_kwargs
preprocessors = get_preprocessors(preprocessors_kwargs)
simplifier = get_fairseq_simplifier(exp_dir, **generate_kwargs)
return get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
def find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs, parametrization_budget=64):
@lru_cache()
def evaluate_parametrization(**instru_kwargs):
# Note that we use default generate kwargs instead of provided one because they are faster
preprocessors_kwargs = instru_kwargs_to_preprocessors_kwargs(instru_kwargs)
simplifier = get_simplifier(exp_dir, preprocessors_kwargs=preprocessors_kwargs, generate_kwargs={})
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
return combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
def preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs):
instru_kwargs = {}
for preprocessor_name, preprocessor_kwargs in preprocessors_kwargs.items():
assert '_' not in preprocessor_name
preprocessor = get_preprocessor_by_name(preprocessor_name)(**preprocessor_kwargs)
# First we set the values from preprocessors_kwargs which are constant
for kwarg_name, kwarg_value in preprocessor_kwargs.items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
# Then we overwrite some of these values with nevergrad variables when necessary
for kwarg_name, kwarg_value in preprocessor.get_nevergrad_variables().items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
return instru_kwargs
def instru_kwargs_to_preprocessors_kwargs(instru_kwargs):
preprocessors_kwargs = defaultdict(dict)
for key, value in instru_kwargs.items():
preprocessor_name, kwarg_name = re.match(r'([a-zA-Z0-9]+)_([a-z0-9_]+)', key).groups()
preprocessors_kwargs[preprocessor_name][kwarg_name] = value
return dict(preprocessors_kwargs)
instru_kwargs = preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs)
instru = Instrumentation(**instru_kwargs)
if instru.dimension == 0:
return preprocessors_kwargs
# No need to search a lot when there is only a few parameters
parametrization_budget = min(32**instru.dimension, parametrization_budget)
optimizer = optimizerlib.ScrHammersleySearch(instrumentation=instru, budget=parametrization_budget, num_workers=1)
recommendation = optimizer.optimize(evaluate_parametrization, verbosity=0)
return instru_kwargs_to_preprocessors_kwargs(recommendation.kwargs)
def check_and_resolve_args(kwargs):
if kwargs.get('diverse_beam_groups_ratio', None) is not None:
diverse_beam_groups = max(int(kwargs['beam'] * kwargs['diverse_beam_groups_ratio']), 1)
print(f'diverse_beam_groups={diverse_beam_groups}')
assert kwargs['beam'] % diverse_beam_groups == 0
kwargs['diverse_beam_groups'] = diverse_beam_groups
else:
diverse_beam_groups = None
return kwargs
@print_method_name
@print_args
@print_result
@print_running_time
def fairseq_train_and_evaluate(dataset, metrics_coefs=[1, 1, 1], parametrization_budget=64, **kwargs):
check_dataset(dataset)
kwargs = check_and_resolve_args(kwargs)
exp_dir = prepare_exp_dir()
preprocessors_kwargs = kwargs.get('preprocessors_kwargs', {})
preprocessors = get_preprocessors(preprocessors_kwargs)
if len(preprocessors) > 0:
dataset = create_preprocessed_dataset(dataset, preprocessors, n_jobs=1)
shutil.copy(get_dataset_dir(dataset) / 'preprocessors.pickle', exp_dir)
preprocessed_dir = fairseq_preprocess(dataset)
train_kwargs = get_allowed_kwargs(fairseq_train, preprocessed_dir, exp_dir, **kwargs)
fairseq_train(preprocessed_dir, exp_dir=exp_dir, **train_kwargs)
# Evaluation
generate_kwargs = get_allowed_kwargs(fairseq_generate, 'complex_filepath', 'pred_filepath', exp_dir, **kwargs)
recommended_preprocessors_kwargs = find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs,
parametrization_budget)
print(f'recommended_preprocessors_kwargs={recommended_preprocessors_kwargs}')
simplifier = get_simplifier(exp_dir, recommended_preprocessors_kwargs, generate_kwargs)
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
print(f'scores={scores}')
score = combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
return score
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
import os
from pathlib import Path
import random
import re
import shutil
import tempfile
import time
from fairseq import options
from fairseq_cli import preprocess, train, generate
from access.resources.paths import get_dataset_dir, EXP_DIR
from access.utils.helpers import (log_stdout, lock_directory, create_directory_or_skip, yield_lines,
write_lines)
def get_fairseq_exp_dir(job_id=None):
if job_id is not None:
dir_name = f'slurmjob_{job_id}'
else:
dir_name = f'local_{int(time.time() * 1000)}'
return Path(EXP_DIR) / f'fairseq' / dir_name
def fairseq_preprocess(dataset):
dataset_dir = get_dataset_dir(dataset)
with lock_directory(dataset_dir):
preprocessed_dir = dataset_dir / 'fairseq_preprocessed'
with create_directory_or_skip(preprocessed_dir):
preprocessing_parser = options.get_preprocessing_parser()
preprocess_args = preprocessing_parser.parse_args([
'--source-lang',
'complex',
'--target-lang',
'simple',
'--trainpref',
os.path.join(dataset_dir, f'{dataset}.train'),
'--validpref',
os.path.join(dataset_dir, f'{dataset}.valid'),
'--testpref',
os.path.join(dataset_dir, f'{dataset}.test'),
'--destdir',
str(preprocessed_dir),
'--output-format',
'raw',
])
preprocess.main(preprocess_args)
return preprocessed_dir
def fairseq_train(
preprocessed_dir,
exp_dir,
ngpus=None,
max_tokens=2000,
arch='fconv_iwslt_de_en',
pretrained_emb_path=None,
embeddings_dim=None,
# Transformer (decoder is the same as encoder for now)
encoder_embed_dim=512,
encoder_layers=6,
encoder_attention_heads=8,
# encoder_decoder_dim_ratio=1,
# share_embeddings=True,
max_epoch=50,
warmup_updates=None,
lr=0.1,
min_lr=1e-9,
dropout=0.2,
label_smoothing=0.1,
lr_scheduler='fixed',
weight_decay=0.0001,
criterion='label_smoothed_cross_entropy',
optimizer='nag',
validations_before_sari_early_stopping=10,
fp16=False):
exp_dir = Path(exp_dir)
with log_stdout(exp_dir / 'fairseq_train.stdout'):
preprocessed_dir = Path(preprocessed_dir)
exp_dir.mkdir(exist_ok=True, parents=True)
# Copy dictionaries to exp_dir for generation
shutil.copy(preprocessed_dir / 'dict.complex.txt', exp_dir)
shutil.copy(preprocessed_dir / 'dict.simple.txt', exp_dir)
train_parser = options.get_training_parser()
# if share_embeddings:
# assert encoder_decoder_dim_ratio == 1
args = [
'--task',
'translation',
preprocessed_dir,
'--raw-text',
'--source-lang',
'complex',
'--target-lang',
'simple',
'--save-dir',
os.path.join(exp_dir, 'checkpoints'),
'--clip-norm',
0.1,
'--criterion',
criterion,
'--no-epoch-checkpoints',
'--save-interval-updates',
5000, # Validate every n updates
'--validations-before-sari-early-stopping',
validations_before_sari_early_stopping,
'--arch',
arch,
# '--decoder-out-embed-dim', int(embeddings_dim * encoder_decoder_dim_ratio), # Output dim of decoder
'--max-tokens',
max_tokens,
'--max-epoch',
max_epoch,
'--lr-scheduler',
lr_scheduler,
'--dropout',
dropout,
'--lr',
lr,
'--lr-shrink',
0.5, # For reduce lr on plateau scheduler
'--min-lr',
min_lr,
'--weight-decay',
weight_decay,
'--optimizer',
optimizer,
'--label-smoothing',
label_smoothing,
'--seed',
random.randint(1, 1000),
# '--force-anneal', '200',
# '--distributed-world-size', '1',
]
if arch == 'transformer':
args.extend([
'--encoder-embed-dim',
encoder_embed_dim,
'--encoder-ffn-embed-dim',
4 * encoder_embed_dim,
'--encoder-layers',
encoder_layers,
'--encoder-attention-heads',
encoder_attention_heads,
'--decoder-layers',
encoder_layers,
'--decoder-attention-heads',
encoder_attention_heads,
])
if pretrained_emb_path is not None:
args.extend(['--encoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
args.extend(['--decoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
if embeddings_dim is not None:
args.extend(['--encoder-embed-dim', embeddings_dim]) # Input and output dim of encoder
args.extend(['--decoder-embed-dim', embeddings_dim]) # Input dim of decoder
if ngpus is not None:
args.extend(['--distributed-world-size', ngpus])
# if share_embeddings:
# args.append('--share-input-output-embed')
if fp16:
args.append('--fp16')
if warmup_updates is not None:
args.extend(['--warmup-updates', warmup_updates])
args = [str(arg) for arg in args]
train_args = options.parse_args_and_arch(train_parser, args)
train.main(train_args)
def _fairseq_generate(complex_filepath,
output_pred_filepath,
checkpoint_paths,
complex_dictionary_path,
simple_dictionary_path,
beam=5,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
# exp_dir must contain checkpoints/checkpoint_best.pt, and dict.{complex,simple}.txt
# First copy input complex file to exp_dir and create dummy simple file
tmp_dir = Path(tempfile.mkdtemp())
new_complex_filepath = tmp_dir / 'tmp.complex-simple.complex'
dummy_simple_filepath = tmp_dir / 'tmp.complex-simple.simple'
shutil.copy(complex_filepath, new_complex_filepath)
shutil.copy(complex_filepath, dummy_simple_filepath)
shutil.copy(complex_dictionary_path, tmp_dir / 'dict.complex.txt')
shutil.copy(simple_dictionary_path, tmp_dir / 'dict.simple.txt')
generate_parser = options.get_generation_parser()
args = [
tmp_dir,
'--path',
':'.join([str(path) for path in checkpoint_paths]),
'--beam',
beam,
'--nbest',
hypothesis_num,
'--lenpen',
lenpen,
'--diverse-beam-groups',
diverse_beam_groups if diverse_beam_groups is not None else -1,
'--diverse-beam-strength',
diverse_beam_strength,
'--batch-size',
batch_size,
'--raw-text',
'--print-alignment',
'--gen-subset',
'tmp',
# We don't want to reload pretrained embeddings
'--model-overrides',
{
'encoder_embed_path': None,
'decoder_embed_path': None
},
]
if sampling:
args.extend([
'--sampling',
'--sampling-topk',
10,
])
args = [str(arg) for arg in args]
generate_args = options.parse_args_and_arch(generate_parser, args)
out_filepath = tmp_dir / 'generation.out'
with log_stdout(out_filepath, mute_stdout=True):
# evaluate model in batch mode
generate.main(generate_args)
# Retrieve translations
def parse_all_hypotheses(out_filepath):
hypotheses_dict = defaultdict(list)
for line in yield_lines(out_filepath):
match = re.match(r'^H-(\d+)\t-?\d+\.\d+\t(.*)$', line)
if match:
sample_id, hypothesis = match.groups()
hypotheses_dict[int(sample_id)].append(hypothesis)
# Sort in original order
return [hypotheses_dict[i] for i in range(len(hypotheses_dict))]
all_hypotheses = parse_all_hypotheses(out_filepath)
predictions = [hypotheses[hypothesis_num - 1] for hypotheses in all_hypotheses]
write_lines(predictions, output_pred_filepath)
os.remove(dummy_simple_filepath)
os.remove(new_complex_filepath)
def fairseq_generate(complex_filepath,
output_pred_filepath,
exp_dir,
beam=1,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
exp_dir = Path(exp_dir)
checkpoint_path = exp_dir / 'checkpoints/checkpoint_best.pt'
assert checkpoint_path.exists(), f'Generation failed, no checkpoint at {checkpoint_path}'
complex_dictionary_path = exp_dir / 'dict.complex.txt'
simple_dictionary_path = exp_dir / 'dict.simple.txt'
_fairseq_generate(complex_filepath,
output_pred_filepath, [checkpoint_path],
complex_dictionary_path=complex_dictionary_path,
simple_dictionary_path=simple_dictionary_path,
beam=beam,
hypothesis_num=hypothesis_num,
lenpen=lenpen,
diverse_beam_groups=diverse_beam_groups,
diverse_beam_strength=diverse_beam_strength,
sampling=sampling,
batch_size=batch_size)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from contextlib import contextmanager, AbstractContextManager
from fcntl import flock, LOCK_EX, LOCK_UN
import inspect
import io
from itertools import zip_longest
from pathlib import Path
import shutil
import sys
import tempfile
import numpy as np
@contextmanager
def open_files(filepaths, mode='r'):
files = []
try:
files = [Path(filepath).open(mode) for filepath in filepaths]
yield files
finally:
[f.close() for f in files]
def yield_lines_in_parallel(filepaths, strip=True, strict=True, n_lines=float('inf')):
assert type(filepaths) == list
with open_files(filepaths) as files:
for i, parallel_lines in enumerate(zip_longest(*files)):
if i >= n_lines:
break
if None in parallel_lines:
assert not strict, f'Files don\'t have the same number of lines: {filepaths}, use strict=False'
if strip:
parallel_lines = [l.rstrip('\n') if l is not None else None for l in parallel_lines]
yield parallel_lines
class FilesWrapper:
'''Write to multiple open files at the same time'''
def __init__(self, files, strict=True):
self.files = files
self.strict = strict # Whether to raise an exception when a line is None
def write(self, lines):
assert len(lines) == len(self.files)
for line, f in zip(lines, self.files):
if line is None:
assert not self.strict
continue
f.write(line.rstrip('\n') + '\n')
@contextmanager
def write_lines_in_parallel(filepaths, strict=True):
with open_files(filepaths, 'w') as files:
yield FilesWrapper(files, strict=strict)
def write_lines(lines, filepath):
filepath = Path(filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
with filepath.open('w') as f:
for line in lines:
f.write(line + '\n')
def yield_lines(filepath, n_lines=float('inf'), prop=1):
if prop < 1:
assert n_lines == float('inf')
n_lines = int(prop * count_lines(filepath))
with open(filepath, 'r') as f:
for i, l in enumerate(f):
if i >= n_lines:
break
yield l.rstrip('\n')
def read_lines(filepath, n_lines=float('inf'), prop=1):
return list(yield_lines(filepath, n_lines, prop))
def count_lines(filepath):
n_lines = 0
with Path(filepath).open() as f:
for l in f:
n_lines += 1
return n_lines
@contextmanager
def open_with_lock(filepath, mode):
with open(filepath, mode) as f:
flock(f, LOCK_EX)
yield f
flock(f, LOCK_UN)
def get_lockfile_path(path):
path = Path(path)
if path.is_dir():
return path / '.lockfile'
if path.is_file():
return path.parent / f'.{path.name}.lockfile'
@contextmanager
def lock_directory(dir_path):
# TODO: Locking a directory should lock all files in that directory
# Right now if we lock foo/, someone else can lock foo/bar.txt
# TODO: Nested with lock_directory() should not be blocking
assert Path(dir_path).exists(), f'Directory does not exists: {dir_path}'
lockfile_path = get_lockfile_path(dir_path)
with open_with_lock(lockfile_path, 'w'):
yield
def safe_division(a, b):
if b == 0:
return 0
return a / b
def harmonic_mean(values, coefs=None):
if 0 in values:
return 0
values = np.array(values)
if coefs is None:
coefs = np.ones(values.shape)
values = np.array(values)
coefs = np.array(coefs)
return np.sum(coefs) / np.dot(coefs, 1 / values)
@contextmanager
def mute(mute_stdout=True, mute_stderr=True):
save_stdout = sys.stdout
save_stderr = sys.stderr
if mute_stdout:
sys.stdout = io.StringIO()
if mute_stderr:
sys.stderr = io.StringIO()
try:
yield
finally:
sys.stdout = save_stdout
sys.stderr = save_stderr
@contextmanager
def log_stdout(filepath, mute_stdout=False):
'''Context manager to write both to stdout and to a file'''
class MultipleStreamsWriter:
def __init__(self, streams):
self.streams = streams
def write(self, message):
for stream in self.streams:
stream.write(message)
def flush(self):
for stream in self.streams:
stream.flush()
save_stdout = sys.stdout
log_file = open(filepath, 'w')
if mute_stdout:
sys.stdout = MultipleStreamsWriter([log_file]) # Write to file only
else:
sys.stdout = MultipleStreamsWriter([save_stdout, log_file]) # Write to both stdout and file
try:
yield
finally:
sys.stdout = save_stdout
log_file.close()
def add_dicts(*dicts):
return {k: v for dic in dicts for k, v in dic.items()}
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_allowed_kwargs(func, *args, **kwargs):
expected_args = inspect.getargspec(func).args
allowed_kwargs = expected_args[len(args):]
return {k: v for k, v in kwargs.items() if k in allowed_kwargs}
class SkipWithBlock(Exception):
pass
class create_directory_or_skip(AbstractContextManager):
'''Context manager for creating a new directory (with rollback and skipping with block if exists)
In order to skip the execution of the with block if the dataset already exists, this context manager uses deep
magic from https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block
'''
def __init__(self, dir_path, overwrite=False):
self.dir_path = Path(dir_path)
self.overwrite = overwrite
def __enter__(self):
if self.dir_path.exists():
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
files_in_directory = list(self.dir_path.iterdir())
if set(files_in_directory) in [set([]), set([self.dir_path / '.lockfile'])]:
# TODO: Quick hack to remove empty directories
self.directory_lock.__exit__(None, None, None)
print(f'Removing empty directory {self.dir_path}')
shutil.rmtree(self.dir_path)
else:
# Deep magic hack to skip the execution of the code inside the with block
# We set the trace to a dummy function
sys.settrace(lambda *args, **keys: None)
# Get the calling frame (sys._getframe(0) is the current frame)
frame = sys._getframe(1)
# Set the calling frame's trace to the one that raises the special exception
frame.f_trace = self.trace
return
print(f'Creating {self.dir_path}...')
self.dir_path.mkdir(parents=True, exist_ok=True)
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
def trace(self, frame, event, arg):
# This method is called when a new local scope is entered, i.e. right when the code in the with block begins
# The exception will therefore be caught by the __exit__()
raise SkipWithBlock()
def __exit__(self, type, value, traceback):
self.directory_lock.__exit__(type, value, traceback)
if type is not None:
if issubclass(type, SkipWithBlock):
return True # Suppress special SkipWithBlock exception
if issubclass(type, BaseException):
# Rollback
print(f'Error: Rolling back creation of directory {self.dir_path}')
shutil.rmtree(self.dir_path)
return False # Reraise the exception
def get_temp_filepath(create=False):
temp_filepath = Path(tempfile.mkstemp()[1])
if not create:
temp_filepath.unlink()
return temp_filepath
def get_temp_filepaths(n_filepaths, create=False):
return [get_temp_filepath(create=create) for _ in range(n_filepaths)]
def delete_files(filepaths):
for filepath in filepaths:
filepath = Path(filepath)
assert filepath.is_file()
filepath.unlink()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# TODO: Move to utils/training.py
from functools import wraps
import time
def print_method_name(func):
'''Decorator to print method name for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f"method_name='{func.__name__}'")
return func(*args, **kwargs)
return wrapped_func
def print_args(func):
'''Decorator to print arguments of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f'args={args}')
print(f'kwargs={kwargs}')
return func(*args, **kwargs)
return wrapped_func
def print_result(func):
'''Decorator to print result of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
print(f'result={result}')
return result
return wrapped_func
def print_running_time(func):
'''Decorator to print running time of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
print(f'running_time={time.time() - start_time}')
return result
return wrapped_func
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.utils.helpers import harmonic_mean
# Tranforms take a value and cast it to a score between 0 and 1, the higher the better
def bleu_transform(bleu):
min_bleu = 0
max_bleu = 100
bleu = max(bleu, min_bleu)
bleu = min(bleu, max_bleu)
return (bleu - min_bleu) / (max_bleu - min_bleu)
def sari_transform(sari):
min_sari = 0
max_sari = 60
sari = max(sari, min_sari)
sari = min(sari, max_sari)
return (sari - min_sari) / (max_sari - min_sari)
def fkgl_transform(fkgl):
min_fkgl = 0
max_fkgl = 20
fkgl = max(fkgl, min_fkgl)
fkgl = min(fkgl, max_fkgl)
return 1 - (fkgl - min_fkgl) / (max_fkgl - min_fkgl)
def combine_metrics(bleu, sari, fkgl, coefs):
# Combine into a score between 0 and 1, LOWER the better
assert len(coefs) == 3
return 1 - harmonic_mean([bleu_transform(bleu), sari_transform(sari), fkgl_transform(fkgl)], coefs)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from easse.cli import evaluate_system_output
from access.preprocess import lowercase_file, to_lrb_rrb_file
from access.resources.paths import get_data_filepath
from access.utils.helpers import mute, get_temp_filepath
'''A simplifier is a method with signature: simplifier(complex_filepath, output_pred_filepath)'''
def get_prediction_on_turkcorpus(simplifier, phase):
source_filepath = get_data_filepath('turkcorpus', phase, 'complex')
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
return pred_filepath
def evaluate_simplifier_on_turkcorpus(simplifier, phase):
pred_filepath = get_prediction_on_turkcorpus(simplifier, phase)
pred_filepath = lowercase_file(pred_filepath)
pred_filepath = to_lrb_rrb_file(pred_filepath)
return evaluate_system_output(f'turkcorpus_{phase}_legacy',
sys_sents_path=pred_filepath,
metrics=['bleu', 'sari_legacy', 'fkgl'],
quality_estimation=True)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import fileinput
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.text import word_tokenize
from access.utils.helpers import yield_lines, write_lines, get_temp_filepath, mute
if __name__ == '__main__':
# Usage: python generate.py < my_file.complex
# Read from stdin
source_filepath = get_temp_filepath()
write_lines([word_tokenize(line) for line in fileinput.input()], source_filepath)
# Load best model
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
# Simplify
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
for line in yield_lines(pred_filepath):
print(line)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.fairseq.main import fairseq_train_and_evaluate
from access.resources.prepare import prepare_wikilarge, prepare_turkcorpus
if __name__ == '__main__':
print('Training a model from scratch')
prepare_wikilarge()
prepare_turkcorpus()
kwargs = {
'arch': 'transformer',
'warmup_updates': 4000,
'parametrization_budget': 256,
'beam': 8,
'dataset': 'wikilarge',
'dropout': 0.2,
'fp16': False,
'label_smoothing': 0.54,
'lr': 0.00011,
'lr_scheduler': 'fixed',
'max_epoch': 100,
'max_tokens': 5000,
'metrics_coefs': [0, 1, 0],
'optimizer': 'adam',
'preprocessors_kwargs': {
'LengthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'LevenshteinPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'WordRankRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'DependencyTreeDepthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'SentencePiecePreprocessor': {
'vocab_size': 10000
}
}
}
fairseq_train_and_evaluate(**kwargs)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_turkcorpus, prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
if __name__ == '__main__':
print('Evaluating pretrained model')
prepare_turkcorpus()
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
print(evaluate_simplifier_on_turkcorpus(simplifier, phase='test'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.