python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
#! /usr/bin/env python
#
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from setuptools import find_packages
from arxiv_latex_cleaner._version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = []
with open("requirements.txt") as f:
for l in f.readlines():
l_c = l.strip()
if l_c and not l_c.startswith('#'):
install_requires.append(l_c)
setup(
name="arxiv_latex_cleaner",
version=__version__,
packages=find_packages(exclude=["*.tests"]),
python_requires='>=3',
url="https://github.com/google-research/arxiv-latex-cleaner",
license="Apache License, Version 2.0",
author="Google Research Authors",
author_email="[email protected]",
description="Cleans the LaTeX code of your paper to submit to arXiv.",
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={
"console_scripts": ["arxiv_latex_cleaner=arxiv_latex_cleaner.__main__:__main__"]
},
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Science/Research",
],
)
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "v1.0.1"
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleans the LaTeX code of your paper to submit to arXiv."""
import collections
import contextlib
import copy
import os
from tempfile import tempdir
import tempfile
import regex
import shutil
import subprocess
import logging
from PIL import Image
PDF_RESIZE_COMMAND = (
'gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dNOPAUSE -dQUIET -dBATCH '
'-dDownsampleColorImages=true -dColorImageResolution={resolution} '
'-dColorImageDownsampleThreshold=1.0 -dAutoRotatePages=/None '
'-sOutputFile={output} {input}')
MAX_FILENAME_LENGTH = 120
# Fix for Windows: Even if '\' (os.sep) is the standard way of making paths on
# Windows, it interferes with regular expressions. We just change os.sep to '/'
# and os.path.join to a version using '/' as Windows will handle it the right
# way.
if os.name == 'nt':
global old_os_path_join
def new_os_join(path, *args):
res = old_os_path_join(path, *args)
res = res.replace('\\', '/')
return res
old_os_path_join = os.path.join
os.sep = '/'
os.path.join = new_os_join
def _create_dir_erase_if_exists(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def _create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def _keep_pattern(haystack, patterns_to_keep):
"""Keeps the strings that match 'patterns_to_keep'."""
out = []
for item in haystack:
if any((regex.findall(rem, item) for rem in patterns_to_keep)):
out.append(item)
return out
def _remove_pattern(haystack, patterns_to_remove):
"""Removes the strings that match 'patterns_to_remove'."""
return [
item for item in haystack
if item not in _keep_pattern([item], patterns_to_remove)
]
def _list_all_files(in_folder, ignore_dirs=None):
if ignore_dirs is None:
ignore_dirs = []
to_consider = [
os.path.join(os.path.relpath(path, in_folder), name)
if path != in_folder else name
for path, _, files in os.walk(in_folder)
for name in files
]
return _remove_pattern(to_consider, ignore_dirs)
def _copy_file(filename, params):
_create_dir_if_not_exists(
os.path.join(params['output_folder'], os.path.dirname(filename)))
shutil.copy(
os.path.join(params['input_folder'], filename),
os.path.join(params['output_folder'], filename))
def _remove_command(text, command, keep_text=False):
"""Removes '\\command{*}' from the string 'text'.
Regex `base_pattern` used to match balanced parentheses taken from:
https://stackoverflow.com/questions/546433/regular-expression-to-match-balanced-parentheses/35271017#35271017
"""
base_pattern = r'\\' + command + r'\{((?:[^{}]+|\{(?1)\})*)\}'
# Loops in case of nested commands that need to retain text, e.g.,
# \red{hello \red{world}}.
while True:
all_substitutions = []
has_match = False
for match in regex.finditer(base_pattern, text):
# In case there are only spaces or nothing up to the following newline,
# adds a percent, not to alter the newlines.
has_match = True
new_substring = '' if not keep_text else text[match.span()[0] +
len(command) +
2:match.span()[1] - 1]
if match.span()[1] < len(text):
next_newline = text[match.span()[1]:].find('\n')
if next_newline != -1:
text_until_newline = text[match.span()[1]:match.span()[1] +
next_newline]
if (not text_until_newline or
text_until_newline.isspace()) and not keep_text:
new_substring = '%'
all_substitutions.append(
(match.span()[0], match.span()[1], new_substring))
for (start, end, new_substring) in reversed(all_substitutions):
text = text[:start] + new_substring + text[end:]
if not keep_text or not has_match:
break
return text
def _remove_environment(text, environment):
"""Removes '\\begin{environment}*\\end{environment}' from 'text'."""
# Need to escape '{', to not trigger fuzzy matching if `environment` starts
# with one of 'i', 'd', 's', or 'e'
return regex.sub(
r'\\begin\{' + environment + r'}[\s\S]*?\\end\{' + environment + r'}', '',
text)
def _remove_iffalse_block(text):
"""Removes possibly nested r'\iffalse*\fi' blocks from 'text'."""
p = regex.compile(r'\\if\s*(\w+)|\\fi(?!\w)')
level = -1
positions_to_delete = []
start, end = 0, 0
for m in p.finditer(text):
if (m.group().replace(' ', '') == r'\iffalse' or
m.group().replace(' ', '') == r'\if0') and level == -1:
level += 1
start = m.start()
elif m.group().startswith(r'\if') and level >= 0:
level += 1
elif m.group() == r'\fi' and level >= 0:
if level == 0:
end = m.end()
positions_to_delete.append((start, end))
level -= 1
else:
pass
for (start, end) in reversed(positions_to_delete):
if end < len(text) and text[end].isspace():
end_to_del = end + 1
else:
end_to_del = end
text = text[:start] + text[end_to_del:]
return text
def _remove_comments_inline(text):
"""Removes the comments from the string 'text'."""
if 'auto-ignore' in text:
return text
if text.lstrip(' ').lstrip('\t').startswith('%'):
return ''
match = regex.search(r'(?<!\\)%', text)
if match:
return text[:match.end()] + '\n'
else:
return text
def _strip_tex_contents(lines, end_str):
"""Removes everything after end_str."""
for i in range(len(lines)):
if end_str in lines[i]:
if '%' not in lines[i]:
return lines[:i + 1]
elif lines[i].index('%') > lines[i].index(end_str):
return lines[:i + 1]
return lines
def _read_file_content(filename):
with open(filename, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
lines = _strip_tex_contents(lines, '\\end{document}')
return lines
def _read_all_tex_contents(tex_files, parameters):
contents = {}
for fn in tex_files:
contents[fn] = _read_file_content(
os.path.join(parameters['input_folder'], fn))
return contents
def _write_file_content(content, filename):
_create_dir_if_not_exists(os.path.dirname(filename))
with open(filename, 'w', encoding='utf-8') as fp:
return fp.write(content)
def _remove_comments_and_commands_to_delete(content, parameters):
"""Erases all LaTeX comments in the content, and writes it."""
content = [_remove_comments_inline(line) for line in content]
content = _remove_environment(''.join(content), 'comment')
content = _remove_iffalse_block(content)
for environment in parameters.get('environments_to_delete', []):
content = _remove_environment(content, environment)
for command in parameters.get('commands_only_to_delete', []):
content = _remove_command(content, command, True)
for command in parameters['commands_to_delete']:
content = _remove_command(content, command, False)
return content
def _replace_tikzpictures(content, figures):
"""
Replaces all tikzpicture environments (with includegraphic commands of
external PDF figures) in the content, and writes it.
"""
def get_figure(matchobj):
found_tikz_filename = regex.search(r'\\tikzsetnextfilename{(.*?)}',
matchobj.group(0)).group(1)
# search in tex split if figure is available
matching_tikz_filenames = _keep_pattern(
figures, ['/' + found_tikz_filename + '.pdf'])
if len(matching_tikz_filenames) == 1:
return '\\includegraphics{' + matching_tikz_filenames[0] + '}'
else:
return matchobj.group(0)
content = regex.sub(r'\\tikzsetnextfilename{[\s\S]*?\\end{tikzpicture}',
get_figure, content)
return content
def _replace_includesvg(content, svg_inkscape_files):
def repl_svg(matchobj):
svg_path = matchobj.group(2)
svg_filename = os.path.basename(svg_path)
# search in svg_inkscape split if pdf_tex file is available
matching_pdf_tex_files = _keep_pattern(
svg_inkscape_files, ['/' + svg_filename + '-tex.pdf_tex'])
if len(matching_pdf_tex_files) == 1:
options = '' if matchobj.group(1) is None else matchobj.group(1)
return f'\\includeinkscape{options}{{{matching_pdf_tex_files[0]}}}'
else:
return matchobj.group(0)
content = regex.sub(r'\\includesvg(\[.*?\])?{(.*?)}', repl_svg, content)
return content
def _resize_and_copy_figure(filename, origin_folder, destination_folder,
resize_image, image_size, compress_pdf,
pdf_resolution):
"""Resizes and copies the input figure (either JPG, PNG, or PDF)."""
_create_dir_if_not_exists(
os.path.join(destination_folder, os.path.dirname(filename)))
if resize_image and os.path.splitext(filename)[1].lower() in [
'.jpg', '.jpeg', '.png'
]:
im = Image.open(os.path.join(origin_folder, filename))
if max(im.size) > image_size:
im = im.resize(
tuple([int(x * float(image_size) / max(im.size)) for x in im.size]),
Image.Resampling.LANCZOS)
if os.path.splitext(filename)[1].lower() in ['.jpg', '.jpeg']:
im.save(os.path.join(destination_folder, filename), 'JPEG', quality=90)
elif os.path.splitext(filename)[1].lower() in ['.png']:
im.save(os.path.join(destination_folder, filename), 'PNG')
elif compress_pdf and os.path.splitext(filename)[1].lower() == '.pdf':
_resize_pdf_figure(filename, origin_folder, destination_folder,
pdf_resolution)
else:
shutil.copy(
os.path.join(origin_folder, filename),
os.path.join(destination_folder, filename))
def _resize_pdf_figure(filename,
origin_folder,
destination_folder,
resolution,
timeout=10):
input_file = os.path.join(origin_folder, filename)
output_file = os.path.join(destination_folder, filename)
bash_command = PDF_RESIZE_COMMAND.format(
input=input_file, output=output_file, resolution=resolution)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
outs, errs = process.communicate()
print('Output: ', outs)
print('Errors: ', errs)
def _copy_only_referenced_non_tex_not_in_root(parameters, contents, splits):
for fn in _keep_only_referenced(
splits['non_tex_not_in_root'], contents, strict=True):
_copy_file(fn, parameters)
def _resize_and_copy_figures_if_referenced(parameters, contents, splits):
image_size = collections.defaultdict(lambda: parameters['im_size'])
image_size.update(parameters['images_allowlist'])
pdf_resolution = collections.defaultdict(
lambda: parameters['pdf_im_resolution'])
pdf_resolution.update(parameters['images_allowlist'])
for image_file in _keep_only_referenced(
splits['figures'], contents, strict=False):
_resize_and_copy_figure(
filename=image_file,
origin_folder=parameters['input_folder'],
destination_folder=parameters['output_folder'],
resize_image=parameters['resize_images'],
image_size=image_size[image_file],
compress_pdf=parameters['compress_pdf'],
pdf_resolution=pdf_resolution[image_file])
def _search_reference(filename, contents, strict=False):
"""Returns a match object if filename is referenced in contents, and None otherwise.
If not strict mode, path prefix and extension are optional.
"""
if strict:
# regex pattern for strict=True for path/to/img.ext:
# \{[\s%]*path/to/img\.ext[\s%]*\}
filename_regex = filename.replace('.', r'\.')
else:
basename = os.path.basename(filename)
# make extension optional
root, extension = os.path.splitext(basename)
unescaped_basename_regex = '{}({})?'.format(root, extension)
basename_regex = unescaped_basename_regex.replace('.', r'\.')
# since os.path.split only splits into two parts
# need to iterate and collect all the fragments
fragments = []
cur_head = os.path.dirname(filename)
while cur_head:
cur_head, tail = os.path.split(cur_head)
fragments.insert(0, tail) # insert at the beginning
path_prefix_regex = ''
for fragment in fragments:
path_prefix_regex = '({}{}{})?'.format(path_prefix_regex, fragment,
os.sep)
# Regex pattern for strict=True for path/to/img.ext:
# \{[\s%]*(<path_prefix>)?<basename>(<ext>)?[\s%]*\}
filename_regex = path_prefix_regex + basename_regex
# Some files 'path/to/file' are referenced in tex as './path/to/file' thus
# adds prefix for relative paths starting with './' or '.\' to regex search.
filename_regex = r'(.' + os.sep + r')?' + filename_regex
# Pads with braces and optional whitespace/comment characters.
patn = r'\{{[\s%]*{}[\s%]*\}}'.format(filename_regex)
# Picture references in LaTeX are allowed to be in different cases.
return regex.search(patn, contents, regex.IGNORECASE)
def _keep_only_referenced(filenames, contents, strict=False):
"""Returns the filenames referenced from contents.
If not strict mode, path prefix and extension are optional.
"""
return [
fn for fn in filenames
if _search_reference(fn, contents, strict) is not None
]
def _keep_only_referenced_tex(contents, splits):
"""Returns the filenames referenced from the tex files themselves.
It needs various iterations in case one file is referenced from an
unreferenced file.
"""
old_referenced = set(splits['tex_in_root'] + splits['tex_not_in_root'])
while True:
referenced = set(splits['tex_in_root'])
for fn in old_referenced:
for fn2 in old_referenced:
if regex.search(r'(' + os.path.splitext(fn)[0] + r'[.}])',
'\n'.join(contents[fn2])):
referenced.add(fn)
if referenced == old_referenced:
splits['tex_to_copy'] = list(referenced)
return
old_referenced = referenced.copy()
def _add_root_tex_files(splits):
# TODO: Check auto-ignore marker in root to detect the main file. Then check
# there is only one non-referenced TeX in root.
# Forces the TeX in root to be copied, even if they are not referenced.
for fn in splits['tex_in_root']:
if fn not in splits['tex_to_copy']:
splits['tex_to_copy'].append(fn)
def _split_all_files(parameters):
"""Splits the files into types or location to know what to do with them."""
file_splits = {
'all':
_list_all_files(
parameters['input_folder'], ignore_dirs=['.git' + os.sep]),
'in_root': [
f for f in os.listdir(parameters['input_folder'])
if os.path.isfile(os.path.join(parameters['input_folder'], f))
]
}
file_splits['not_in_root'] = [
f for f in file_splits['all'] if f not in file_splits['in_root']
]
file_splits['to_copy_in_root'] = _remove_pattern(
file_splits['in_root'],
parameters['to_delete'] + parameters['figures_to_copy_if_referenced'])
file_splits['to_copy_not_in_root'] = _remove_pattern(
file_splits['not_in_root'],
parameters['to_delete'] + parameters['figures_to_copy_if_referenced'])
file_splits['figures'] = _keep_pattern(
file_splits['all'], parameters['figures_to_copy_if_referenced'])
file_splits['tex_in_root'] = _keep_pattern(file_splits['to_copy_in_root'],
['.tex$', '.tikz$'])
file_splits['tex_not_in_root'] = _keep_pattern(
file_splits['to_copy_not_in_root'], ['.tex$', '.tikz$'])
file_splits['non_tex_in_root'] = _remove_pattern(
file_splits['to_copy_in_root'], ['.tex$', '.tikz$'])
file_splits['non_tex_not_in_root'] = _remove_pattern(
file_splits['to_copy_not_in_root'], ['.tex$', '.tikz$'])
if parameters.get('use_external_tikz', None) is not None:
file_splits['external_tikz_figures'] = _keep_pattern(
file_splits['all'], [parameters['use_external_tikz']])
else:
file_splits['external_tikz_figures'] = []
if parameters.get('svg_inkscape', None) is not None:
file_splits['svg_inkscape'] = _keep_pattern(
file_splits['all'], [parameters['svg_inkscape']])
else:
file_splits['svg_inkscape'] = []
return file_splits
def _create_out_folder(input_folder):
"""Creates the output folder, erasing it if existed."""
out_folder = os.path.abspath(input_folder).removesuffix(".zip") + '_arXiv'
_create_dir_erase_if_exists(out_folder)
return out_folder
def run_arxiv_cleaner(parameters):
"""Core of the code, runs the actual arXiv cleaner."""
files_to_delete = [
r'\.aux$', r'\.sh$', r'\.blg$', r'\.brf$', r'\.log$', r'\.out$', r'\.ps$',
r'\.dvi$', r'\.synctex.gz$', '~$', r'\.backup$', r'\.gitignore$',
r'\.DS_Store$', r'\.svg$', r'^\.idea', r'\.dpth$', r'\.md5$', r'\.dep$',
r'\.auxlock$', r'\.fls$', r'\.fdb_latexmk$'
]
if not parameters['keep_bib']:
files_to_delete.append(r'\.bib$')
parameters.update({
'to_delete':
files_to_delete,
'figures_to_copy_if_referenced': [
r'\.png$', r'\.jpg$', r'\.jpeg$', r'\.pdf$'
]
})
logging.info('Collecting file structure.')
parameters['output_folder'] = _create_out_folder(parameters['input_folder'])
from_zip = parameters['input_folder'].endswith('.zip')
tempdir_context = tempfile.TemporaryDirectory() if from_zip else contextlib.suppress()
with tempdir_context as tempdir:
if from_zip:
logging.info('Unzipping input folder.')
shutil.unpack_archive(parameters['input_folder'], tempdir)
parameters['input_folder'] = tempdir
splits = _split_all_files(parameters)
logging.info('Reading all tex files')
tex_contents = _read_all_tex_contents(
splits['tex_in_root'] + splits['tex_not_in_root'], parameters)
for tex_file in tex_contents:
logging.info('Removing comments in file %s.', tex_file)
tex_contents[tex_file] = _remove_comments_and_commands_to_delete(
tex_contents[tex_file], parameters)
for tex_file in tex_contents:
logging.info('Replacing \\includesvg calls in file %s.', tex_file)
tex_contents[tex_file] = _replace_includesvg(tex_contents[tex_file],
splits['svg_inkscape'])
for tex_file in tex_contents:
logging.info('Replacing Tikz Pictures in file %s.', tex_file)
content = _replace_tikzpictures(tex_contents[tex_file],
splits['external_tikz_figures'])
# If file ends with '\n' already, the split in last line would add an extra
# '\n', so we remove it.
tex_contents[tex_file] = content.split('\n')
_keep_only_referenced_tex(tex_contents, splits)
_add_root_tex_files(splits)
for tex_file in splits['tex_to_copy']:
logging.info('Replacing patterns in file %s.', tex_file)
content = '\n'.join(tex_contents[tex_file])
content = _find_and_replace_patterns(
content, parameters.get('patterns_and_insertions', list()))
tex_contents[tex_file] = content
new_path = os.path.join(parameters['output_folder'], tex_file)
logging.info('Writing modified contents to %s.', new_path)
_write_file_content(
content,
new_path,
)
full_content = '\n'.join(
''.join(tex_contents[fn]) for fn in splits['tex_to_copy'])
_copy_only_referenced_non_tex_not_in_root(parameters, full_content, splits)
for non_tex_file in splits['non_tex_in_root']:
logging.info('Copying non-tex file %s.', non_tex_file)
_copy_file(non_tex_file, parameters)
_resize_and_copy_figures_if_referenced(parameters, full_content, splits)
logging.info('Outputs written to %s', parameters['output_folder'])
def strip_whitespace(text):
"""Strips all whitespace characters.
https://stackoverflow.com/questions/8270092/remove-all-whitespace-in-a-string
"""
pattern = regex.compile(r'\s+')
text = regex.sub(pattern, '', text)
return text
def merge_args_into_config(args, config_params):
final_args = copy.deepcopy(config_params)
config_keys = config_params.keys()
for key, value in args.items():
if key in config_keys:
if any([isinstance(value, t) for t in [str, bool, float, int]]):
# Overwrites config value with args value.
final_args[key] = value
elif isinstance(value, list):
# Appends args values to config values.
final_args[key] = value + config_params[key]
elif isinstance(value, dict):
# Updates config params with args params.
final_args[key].update(**value)
else:
final_args[key] = value
return final_args
def _find_and_replace_patterns(content, patterns_and_insertions):
r"""
content: str
patterns_and_insertions: List[Dict]
Example for patterns_and_insertions:
[
{
"pattern" :
r"(?:\\figcompfigures{\s*)(?P<first>.*?)\s*}\s*{\s*(?P<second>.*?)\s*}\s*{\s*(?P<third>.*?)\s*}",
"insertion" :
r"\parbox[c]{{{second}\linewidth}}{{\includegraphics[width={third}\linewidth]{{figures/{first}}}}}}",
"description": "Replace figcompfigures"
},
]
"""
for pattern_and_insertion in patterns_and_insertions:
pattern = pattern_and_insertion['pattern']
insertion = pattern_and_insertion['insertion']
description = pattern_and_insertion['description']
logging.info('Processing pattern: %s.', description)
p = regex.compile(pattern)
m = p.search(content)
while m is not None:
local_insertion = insertion.format(**m.groupdict())
if pattern_and_insertion.get('strip_whitespace', True):
local_insertion = strip_whitespace(local_insertion)
logging.info(f'Found {content[m.start():m.end()]:<70}')
logging.info(f'Replacing with {local_insertion:<30}')
content = content[:m.start()] + local_insertion + content[m.end():]
m = p.search(content)
logging.info('Finished pattern: %s.', description)
return content
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for ``arxiv_latex_cleaner``.
.. code-block:: bash
$ python -m arxiv_latex_cleaner --help
"""
import argparse
import json
import logging
from ._version import __version__
from .arxiv_latex_cleaner import merge_args_into_config
from .arxiv_latex_cleaner import run_arxiv_cleaner
import yaml
PARSER = argparse.ArgumentParser(
prog="arxiv_latex_cleaner@{0}".format(__version__),
description=("Clean the LaTeX code of your paper to submit to arXiv. "
"Check the README for more information on the use."),
)
PARSER.add_argument(
"input_folder", type=str, help="Input folder or zip archive containing the LaTeX code.")
PARSER.add_argument(
"--resize_images",
action="store_true",
help="Resize images.",
)
PARSER.add_argument(
"--im_size",
default=500,
type=int,
help=("Size of the output images (in pixels, longest side). Fine tune this "
"to get as close to 10MB as possible."),
)
PARSER.add_argument(
"--compress_pdf",
action="store_true",
help="Compress PDF images using ghostscript (Linux and Mac only).",
)
PARSER.add_argument(
"--pdf_im_resolution",
default=500,
type=int,
help="Resolution (in dpi) to which the tool resamples the PDF images.",
)
PARSER.add_argument(
"--images_allowlist",
default={},
type=json.loads,
help=("Images (and PDFs) that won't be resized to the default resolution,"
"but the one provided here. Value is pixel for images, and dpi for"
"PDFs, as in --im_size and --pdf_im_resolution, respectively. Format "
"is a dictionary as: '{\"path/to/im.jpg\": 1000}'"),
)
PARSER.add_argument(
"--keep_bib",
action="store_true",
help="Avoid deleting the *.bib files.",
)
PARSER.add_argument(
"--commands_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX commands that will be deleted. Useful for e.g. user-defined "
"\\todo commands. For example, to delete all occurrences of \\todo1{} "
"and \\todo2{}, run the tool with `--commands_to_delete todo1 todo2`."
"Please note that the positional argument `input_folder` cannot come "
"immediately after `commands_to_delete`, as the parser does not have "
"any way to know if it's another command to delete."),
)
PARSER.add_argument(
"--commands_only_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX commands that will be deleted but the text wrapped in the "
"commands will be retained. Useful for commands that change text "
"formats and colors, which you may want to remove but keep the "
"text within. Usages are exactly the same as commands_to_delete. "
"Note that if the commands listed here duplicate that after "
"commands_to_delete, the default action will be retaining the wrapped text."),
)
PARSER.add_argument(
"--environments_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX environments that will be deleted. Useful for e.g. user-"
"defined comment environments. For example, to delete all occurrences "
"of \\begin{note} ... \\end{note}, run the tool with "
"`--environments_to_delete note`. Please note that the positional "
"argument `input_folder` cannot come immediately after "
"`environments_to_delete`, as the parser does not have any way to "
"know if it's another environment to delete."),
)
PARSER.add_argument(
"--use_external_tikz",
type=str,
help=("Folder (relative to input folder) containing externalized tikz "
"figures in PDF format."))
PARSER.add_argument(
"--svg_inkscape",
nargs="?",
type=str,
const="svg-inkscape",
help=(
"Include PDF files generated by Inkscape via the `\\includesvg` "
"command from the `svg` package. This is done by replacing the "
"`\\includesvg` calls with `\\includeinkscape` calls pointing to the "
"generated `.pdf_tex` files. By default, these files and the "
"generated PDFs are located under `./svg-inkscape` (relative to the "
"input folder), but a different path (relative to the input folder) "
"can be provided in case a different `inkscapepath` was set when "
"loading the `svg` package."
)
)
PARSER.add_argument(
"--config",
type=str,
help=("Read settings from `.yaml` config file. If command line arguments "
"are provided additionally, the config file parameters are updated "
"with the command line parameters."),
required=False,
)
PARSER.add_argument(
"--verbose",
action="store_true",
help="Enable detailed output.",
)
ARGS = vars(PARSER.parse_args())
if ARGS["config"] is not None:
try:
with open(ARGS["config"], "r") as config_file:
config_params = yaml.safe_load(config_file)
final_args = merge_args_into_config(ARGS, config_params)
except FileNotFoundError:
print(f"config file {ARGS.config} not found.")
final_args = ARGS
final_args.pop("config", None)
else:
final_args = ARGS
if final_args.get("verbose", False):
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
run_arxiv_cleaner(final_args)
exit(0)
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
import shutil
import unittest
from absl.testing import parameterized
from arxiv_latex_cleaner import arxiv_latex_cleaner
from PIL import Image
def make_args(
input_folder='foo/bar',
resize_images=False,
im_size=500,
compress_pdf=False,
pdf_im_resolution=500,
images_allowlist=None,
commands_to_delete=None,
use_external_tikz='foo/bar/tikz',
):
if images_allowlist is None:
images_allowlist = {}
if commands_to_delete is None:
commands_to_delete = []
args = {
'input_folder': input_folder,
'resize_images': resize_images,
'im_size': im_size,
'compress_pdf': compress_pdf,
'pdf_im_resolution': pdf_im_resolution,
'images_allowlist': images_allowlist,
'commands_to_delete': commands_to_delete,
'use_external_tikz': use_external_tikz,
}
return args
def make_contents():
return (r'& \figcompfigures{'
'\n\timage1.jpg'
'\n}{'
'\n\t'
r'\ww'
'\n}{'
'\n\t1.0'
'\n\t}'
'\n& '
r'\figcompfigures{image2.jpg}{\ww}{1.0}')
def make_patterns():
pattern = r'(?:\\figcompfigures{\s*)(?P<first>.*?)\s*}\s*{\s*(?P<second>.*?)\s*}\s*{\s*(?P<third>.*?)\s*}'
insertion = r"""\parbox[c]{{
{second}\linewidth
}}{{
\includegraphics[
width={third}\linewidth
]{{
figures/{first}
}}
}} """
description = 'Replace figcompfigures'
output = {
'pattern': pattern,
'insertion': insertion,
'description': description
}
return [output]
def make_search_reference_tests():
return ({
'testcase_name': 'prefix1',
'filenames': ['include_image_yes.png', 'include_image.png'],
'contents': '\\include{include_image_yes.png}',
'strict': False,
'true_outputs': ['include_image_yes.png']
}, {
'testcase_name': 'prefix2',
'filenames': ['include_image_yes.png', 'include_image.png'],
'contents': '\\include{include_image.png}',
'strict': False,
'true_outputs': ['include_image.png']
}, {
'testcase_name': 'nested_more_specific',
'filenames': [
'images/im_included.png', 'images/include/images/im_included.png'
],
'contents': '\\include{images/include/images/im_included.png}',
'strict': False,
'true_outputs': ['images/include/images/im_included.png']
}, {
'testcase_name':
'nested_less_specific',
'filenames': [
'images/im_included.png', 'images/include/images/im_included.png'
],
'contents':
'\\include{images/im_included.png}',
'strict':
False,
'true_outputs': [
'images/im_included.png', 'images/include/images/im_included.png'
]
}, {
'testcase_name': 'nested_substring',
'filenames': ['images/im_included.png', 'im_included.png'],
'contents': '\\include{images/im_included.png}',
'strict': False,
'true_outputs': ['images/im_included.png']
}, {
'testcase_name': 'nested_diffpath',
'filenames': ['images/im_included.png', 'figures/im_included.png'],
'contents': '\\include{images/im_included.png}',
'strict': False,
'true_outputs': ['images/im_included.png']
}, {
'testcase_name': 'diffext',
'filenames': ['tables/demo.tex', 'tables/demo.tikz', 'demo.tex'],
'contents': '\\include{tables/demo.tex}',
'strict': False,
'true_outputs': ['tables/demo.tex']
}, {
'testcase_name': 'diffext2',
'filenames': ['tables/demo.tex', 'tables/demo.tikz', 'demo.tex'],
'contents': '\\include{tables/demo}',
'strict': False,
'true_outputs': ['tables/demo.tex', 'tables/demo.tikz']
}, {
'testcase_name': 'strict_prefix1',
'filenames': ['demo_yes.tex', 'demo.tex'],
'contents': '\\include{demo_yes.tex}',
'strict': True,
'true_outputs': ['demo_yes.tex']
}, {
'testcase_name': 'strict_prefix2',
'filenames': ['demo_yes.tex', 'demo.tex'],
'contents': '\\include{demo.tex}',
'strict': True,
'true_outputs': ['demo.tex']
}, {
'testcase_name': 'strict_nested_more_specific',
'filenames': [
'tables/table_included.csv',
'tables/include/tables/table_included.csv'
],
'contents': '\\include{tables/include/tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/include/tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_less_specific',
'filenames': [
'tables/table_included.csv',
'tables/include/tables/table_included.csv'
],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_substring1',
'filenames': ['tables/table_included.csv', 'table_included.csv'],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_substring2',
'filenames': ['tables/table_included.csv', 'table_included.csv'],
'contents': '\\include{table_included.csv}',
'strict': True,
'true_outputs': ['table_included.csv']
}, {
'testcase_name': 'strict_nested_diffpath',
'filenames': ['tables/table_included.csv', 'data/table_included.csv'],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_diffext',
'filenames': ['tables/demo.csv', 'tables/demo.txt', 'demo.csv'],
'contents': '\\include{tables/demo.csv}',
'strict': True,
'true_outputs': ['tables/demo.csv']
}, {
'testcase_name': 'path_starting_with_dot',
'filenames': ['./images/im_included.png', './figures/im_included.png'],
'contents': '\\include{./images/im_included.png}',
'strict': False,
'true_outputs': ['./images/im_included.png']
})
class UnitTests(parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'empty config',
'args': make_args(),
'config_params': {},
'final_args': make_args(),
},
{
'testcase_name': 'empty args',
'args': {},
'config_params': make_args(),
'final_args': make_args(),
},
{
'testcase_name':
'args and config provided',
'args':
make_args(
images_allowlist={'path1/': 1000},
commands_to_delete=[r'\todo1']),
'config_params':
make_args(
'foo_/bar_',
True,
1000,
True,
1000,
images_allowlist={'path2/': 1000},
commands_to_delete=[r'\todo2'],
use_external_tikz='foo_/bar_/tikz_',
),
'final_args':
make_args(
images_allowlist={
'path1/': 1000,
'path2/': 1000
},
commands_to_delete=[r'\todo1', r'\todo2'],
),
},
)
def test_merge_args_into_config(self, args, config_params, final_args):
self.assertEqual(
arxiv_latex_cleaner.merge_args_into_config(args, config_params),
final_args)
@parameterized.named_parameters(
{
'testcase_name': 'no_comment',
'line_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'auto_ignore',
'line_in': '%auto-ignore\n',
'true_output': '%auto-ignore\n'
}, {
'testcase_name': 'percent',
'line_in': r'100\% accurate\n',
'true_output': r'100\% accurate\n'
}, {
'testcase_name': 'comment',
'line_in': ' % Comment\n',
'true_output': ''
}, {
'testcase_name': 'comment_inline',
'line_in': 'Foo %Comment\n',
'true_output': 'Foo %\n'
})
def test_remove_comments_inline(self, line_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_comments_inline(line_in), true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_command',
'text_in': 'Foo\nFoo2\n',
'keep_text': False,
'true_output': 'Foo\nFoo2\n'
}, {
'testcase_name': 'command_not_removed',
'text_in': '\\textit{Foo\nFoo2}\n',
'keep_text': False,
'true_output': '\\textit{Foo\nFoo2}\n'
}, {
'testcase_name': 'command_no_end_line_removed',
'text_in': 'A\\todo{B\nC}D\nE\n\\end{document}',
'keep_text': False,
'true_output': 'AD\nE\n\\end{document}'
}, {
'testcase_name': 'command_with_end_line_removed',
'text_in': 'A\n\\todo{B\nC}\nD\n\\end{document}',
'keep_text': False,
'true_output': 'A\n%\nD\n\\end{document}'
}, {
'testcase_name': 'no_command_keep_text',
'text_in': 'Foo\nFoo2\n',
'keep_text': True,
'true_output': 'Foo\nFoo2\n'
}, {
'testcase_name': 'command_not_removed_keep_text',
'text_in': '\\textit{Foo\nFoo2}\n',
'keep_text': True,
'true_output': '\\textit{Foo\nFoo2}\n'
}, {
'testcase_name': 'command_no_end_line_removed_keep_text',
'text_in': 'A\\todo{B\nC}D\nE\n\\end{document}',
'keep_text': True,
'true_output': 'AB\nCD\nE\n\\end{document}'
}, {
'testcase_name': 'command_with_end_line_removed_keep_text',
'text_in': 'A\n\\todo{B\nC}\nD\n\\end{document}',
'keep_text': True,
'true_output': 'A\nB\nC\nD\n\\end{document}'
}, {
'testcase_name': 'nested_command_keep_text',
'text_in': 'A\n\\todo{B\n\\todo{C}}\nD\n\\end{document}',
'keep_text': True,
'true_output': 'A\nB\nC\nD\n\\end{document}'
}, {
'testcase_name':
'deeply_nested_command_keep_text',
'text_in':
'A\n\\todo{B\n\\emph{C\\footnote{\\textbf{D}}}}\nE\n\\end{document}',
'keep_text':
True,
'true_output':
'A\nB\n\\emph{C\\footnote{\\textbf{D}}}\nE\n\\end{document}'
})
def test_remove_command(self, text_in, keep_text, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_command(text_in, 'todo', keep_text),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_environment',
'text_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'environment_not_removed',
'text_in': 'Foo\n\\begin{equation}\n3x+2\n\\end{equation}\nFoo',
'true_output': 'Foo\n\\begin{equation}\n3x+2\n\\end{equation}\nFoo'
}, {
'testcase_name': 'environment_removed',
'text_in': 'Foo\\begin{comment}\n3x+2\n\\end{comment}\nFoo',
'true_output': 'Foo\nFoo'
})
def test_remove_environment(self, text_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_environment(text_in, 'comment'),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_iffalse',
'text_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'if_not_removed',
'text_in': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n'
}, {
'testcase_name': 'if_removed_with_nested_ifvar',
'text_in': '\\ifvar\n\\iffalse\n\\ifvar\nFoo\n\\fi\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\fi\n'
}, {
'testcase_name': 'if_removed_with_nested_iffalse',
'text_in': '\\ifvar\n\\iffalse\n\\iffalse\nFoo\n\\fi\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\fi\n'
}, {
'testcase_name': 'if_removed_eof',
'text_in': '\\iffalse\nFoo\n\\fi',
'true_output': ''
}, {
'testcase_name': 'if_removed_space',
'text_in': '\\iffalse\nFoo\n\\fi ',
'true_output': ''
}, {
'testcase_name': 'if_removed_backslash',
'text_in': '\\iffalse\nFoo\n\\fi\\end{document}',
'true_output': '\\end{document}'
}, {
'testcase_name': 'commands_not_removed',
'text_in': '\\newcommand\\figref[1]{Figure~\\ref{fig:\#1}}',
'true_output': '\\newcommand\\figref[1]{Figure~\\ref{fig:\#1}}'
})
def test_remove_iffalse_block(self, text_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_iffalse_block(text_in), true_output)
@parameterized.named_parameters(
{
'testcase_name': 'all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a'],
'true_outputs': ['abc', 'bca'],
}, {
'testcase_name': 'not_all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a$'],
'true_outputs': ['bca'],
})
def test_keep_pattern(self, inputs, patterns, true_outputs):
self.assertEqual(
list(arxiv_latex_cleaner._keep_pattern(inputs, patterns)), true_outputs)
@parameterized.named_parameters(
{
'testcase_name': 'all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a'],
'true_outputs': [],
}, {
'testcase_name': 'not_all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a$'],
'true_outputs': ['abc'],
})
def test_remove_pattern(self, inputs, patterns, true_outputs):
self.assertEqual(
list(arxiv_latex_cleaner._remove_pattern(inputs, patterns)),
true_outputs)
@parameterized.named_parameters(
{
'testcase_name':
'replace_contents',
'content':
make_contents(),
'patterns_and_insertions':
make_patterns(),
'true_outputs': (
r'& \parbox[c]{\ww\linewidth}{\includegraphics[width=1.0\linewidth]{figures/image1.jpg}}'
'\n'
r'& \parbox[c]{\ww\linewidth}{\includegraphics[width=1.0\linewidth]{figures/image2.jpg}}'
),
},)
def test_find_and_replace_patterns(self, content, patterns_and_insertions,
true_outputs):
output = arxiv_latex_cleaner._find_and_replace_patterns(
content, patterns_and_insertions)
output = arxiv_latex_cleaner.strip_whitespace(output)
true_outputs = arxiv_latex_cleaner.strip_whitespace(true_outputs)
self.assertEqual(output, true_outputs)
@parameterized.named_parameters(
{
'testcase_name': 'no_tikz',
'text_in': 'Foo\n',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output': 'Foo\n'
}, {
'testcase_name':
'tikz_no_match',
'text_in':
'Foo\\tikzsetnextfilename{test_no_match}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output':
'Foo\\tikzsetnextfilename{test_no_match}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo'
}, {
'testcase_name':
'tikz_match',
'text_in':
'Foo\\tikzsetnextfilename{test2}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output':
'Foo\\includegraphics{ext_tikz/test2.pdf}\nFoo'
})
def test_replace_tikzpictures(self, text_in, figures_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._replace_tikzpictures(text_in, figures_in),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_includesvg',
'text_in': 'Foo\n',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output': 'Foo\n'
}, {
'testcase_name':
'includesvg_no_match',
'text_in':
'Foo\\includesvg{test_no_match}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includesvg{test_no_match}\nFoo',
}, {
'testcase_name':
'includesvg_match',
'text_in':
'Foo\\includesvg{test2}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includeinkscape{ext_svg/test2-tex.pdf_tex}\nFoo'
}, {
'testcase_name':
'includesvg_match_with_options',
'text_in':
'Foo\\includesvg[width=\\linewidth]{test2}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includeinkscape[width=\\linewidth]{ext_svg/test2-tex.pdf_tex}\nFoo'
})
def test_replace_includesvg(self, text_in, figures_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._replace_includesvg(text_in, figures_in),
true_output)
@parameterized.named_parameters(*make_search_reference_tests())
def test_search_reference_weak(self, filenames, contents, strict,
true_outputs):
cleaner_outputs = []
for filename in filenames:
reference = arxiv_latex_cleaner._search_reference(filename, contents,
strict)
if reference is not None:
cleaner_outputs.append(filename)
# weak check (passes as long as cleaner includes a superset of the true_output)
for true_output in true_outputs:
self.assertIn(true_output, cleaner_outputs)
@parameterized.named_parameters(*make_search_reference_tests())
def test_search_reference_strong(self, filenames, contents, strict,
true_outputs):
cleaner_outputs = []
for filename in filenames:
reference = arxiv_latex_cleaner._search_reference(filename, contents,
strict)
if reference is not None:
cleaner_outputs.append(filename)
# strong check (set of files must match exactly)
weak_check_result = set(true_outputs).issubset(cleaner_outputs)
if weak_check_result:
msg = 'not fatal, cleaner included more files than necessary'
else:
msg = 'fatal, see test_search_reference_weak'
self.assertEqual(cleaner_outputs, true_outputs, msg)
@parameterized.named_parameters(
{
'testcase_name': 'three_parent',
'filename': 'long/path/to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{path/to/img.ext}',
'{long/path/to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{ \npath/to/img.ext\n}',
'{ \n \nlong/path/to/img.ext\n}',
'{img}',
'{to/img}',
'{path/to/img}',
'{long/path/to/img}',
# dont match
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 12 + [False] * 13
},
{
'testcase_name': 'two_parent',
'filename': 'path/to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{path/to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{ \npath/to/img.ext\n}',
'{img}',
'{to/img}',
'{path/to/img}',
# dont match
'{long/path/to/img.ext}',
'{ \n \nlong/path/to/img.ext\n}',
'{long/path/to/img}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 9 + [False] * 16
},
{
'testcase_name': 'one_parent',
'filename': 'to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{img}',
'{to/img}',
# dont match
'{long/path/to/img}',
'{path/to/img}',
'{ \n \nlong/path/to/img.ext\n}',
'{ \npath/to/img.ext\n}',
'{long/path/to/img.ext}',
'{path/to/img.ext}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 6 + [False] * 19
},
{
'testcase_name': 'two_parent_strict',
'filename': 'path/to/img.ext',
'content_strs': [
# match
'{path/to/img.ext}',
'{ \npath/to/img.ext\n}',
# dont match
'{img.ext}',
'{to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{img}',
'{to/img}',
'{path/to/img}',
'{long/path/to/img.ext}',
'{ \n \nlong/path/to/img.ext\n}',
'{long/path/to/img}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': True,
'true_outputs': [True] * 2 + [False] * 23
},
)
def test_search_reference_filewise(self, filename, content_strs, strict,
true_outputs):
if len(content_strs) != len(true_outputs):
raise ValueError(
"number of true_outputs doesn't match number of content strs")
for content, true_output in zip(content_strs, true_outputs):
reference = arxiv_latex_cleaner._search_reference(filename, content,
strict)
matched = reference is not None
msg_not = ' ' if true_output else ' not '
msg_fmt = 'file {} should' + msg_not + 'have matched latex reference {}'
msg = msg_fmt.format(filename, content)
self.assertEqual(matched, true_output, msg)
class IntegrationTests(parameterized.TestCase):
def setUp(self):
super(IntegrationTests, self).setUp()
self.out_path = 'tex_arXiv'
def _compare_files(self, filename, filename_true):
if path.splitext(filename)[1].lower() in ['.jpg', '.jpeg', '.png']:
with Image.open(filename) as im, Image.open(filename_true) as im_true:
# We check only the sizes of the images, checking pixels would be too
# complicated in case the resize implementations change.
self.assertEqual(
im.size, im_true.size,
'Images {:s} was not resized properly.'.format(filename))
else:
# Checks if text files are equal without taking in account end of line
# characters.
with open(filename, 'rb') as f:
processed_content = f.read().splitlines()
with open(filename_true, 'rb') as f:
groundtruth_content = f.read().splitlines()
self.assertEqual(
processed_content, groundtruth_content,
'{:s} and {:s} are not equal.'.format(filename, filename_true))
@parameterized.named_parameters(
{'testcase_name': 'from_dir',"input_dir":'tex'},
{'testcase_name': 'from_zip',"input_dir":'tex.zip'},
)
def test_complete(self, input_dir):
out_path_true = 'tex_arXiv_true'
# Make sure the folder does not exist, since we erase it in the test.
if path.isdir(self.out_path):
raise RuntimeError('The folder {:s} should not exist.'.format(
self.out_path))
arxiv_latex_cleaner.run_arxiv_cleaner({
'input_folder': input_dir,
'images_allowlist': {
'images/im2_included.jpg': 200,
'images/im3_included.png': 400,
},
'resize_images': True,
'im_size': 100,
'compress_pdf': False,
'pdf_im_resolution': 500,
'commands_to_delete': ['mytodo'],
'commands_only_to_delete': ['red'],
'environments_to_delete': ['mynote'],
'use_external_tikz': 'ext_tikz',
'keep_bib': False
})
# Checks the set of files is the same as in the true folder.
out_files = set(arxiv_latex_cleaner._list_all_files(self.out_path))
out_files_true = set(arxiv_latex_cleaner._list_all_files(out_path_true))
self.assertEqual(out_files, out_files_true)
# Compares the contents of each file against the true value.
for f1 in out_files:
self._compare_files(
path.join(self.out_path, f1), path.join(out_path_true, f1))
def tearDown(self):
shutil.rmtree(self.out_path)
super(IntegrationTests, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_datasets as tfds
import numpy as np
# A workaround to avoid crash because tfds may open to many files.
import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
# Adjust depending on the available RAM.
MAX_IN_MEMORY = 200_000
DATASET_SPLITS = {
'cifar10': {'train': 'train[:98%]', 'test': 'test'},
'cifar100': {'train': 'train[:98%]', 'test': 'test'},
'imagenet2012': {'train': 'train[:99%]', 'test': 'validation'},
}
def get_dataset_info(dataset, split, examples_per_class):
data_builder = tfds.builder(dataset)
original_num_examples = data_builder.info.splits[split].num_examples
num_classes = data_builder.info.features['label'].num_classes
if examples_per_class is not None:
num_examples = examples_per_class * num_classes
else:
num_examples = original_num_examples
return {'original_num_examples': original_num_examples,
'num_examples': num_examples,
'num_classes': num_classes}
def sample_subset(data, num_examples, num_classes,
examples_per_class, examples_per_class_seed):
data = data.batch(min(num_examples, MAX_IN_MEMORY))
data = data.as_numpy_iterator().next()
np.random.seed(examples_per_class_seed)
indices = [idx
for c in range(num_classes)
for idx in np.random.choice(np.where(data['label'] == c)[0],
examples_per_class,
replace=False)]
data = {'image': data['image'][indices],
'label': data['label'][indices]}
data = tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices(data['image']),
tf.data.Dataset.from_tensor_slices(data['label'])))
return data.map(lambda x, y: {'image': x, 'label': y},
tf.data.experimental.AUTOTUNE)
def get_data(dataset, mode,
repeats, batch_size,
resize_size, crop_size,
mixup_alpha,
examples_per_class, examples_per_class_seed,
num_devices,
tfds_manual_dir):
split = DATASET_SPLITS[dataset][mode]
dataset_info = get_dataset_info(dataset, split, examples_per_class)
data_builder = tfds.builder(dataset)
data_builder.download_and_prepare(
download_config=tfds.download.DownloadConfig(manual_dir=tfds_manual_dir))
data = data_builder.as_dataset(
split=split,
decoders={'image': tfds.decode.SkipDecoding()})
decoder = data_builder.info.features['image'].decode_example
if (mode == 'train') and (examples_per_class is not None):
data = sample_subset(data,
dataset_info['original_num_examples'],
dataset_info['num_classes'],
examples_per_class, examples_per_class_seed)
def _pp(data):
im = decoder(data['image'])
if mode == 'train':
im = tf.image.resize(im, [resize_size, resize_size])
im = tf.image.random_crop(im, [crop_size, crop_size, 3])
im = tf.image.flip_left_right(im)
else:
# usage of crop_size here is intentional
im = tf.image.resize(im, [crop_size, crop_size])
im = (im - 127.5) / 127.5
label = tf.one_hot(data['label'], dataset_info['num_classes'])
return {'image': im, 'label': label}
data = data.cache()
data = data.repeat(repeats)
if mode == 'train':
data = data.shuffle(min(dataset_info['num_examples'], MAX_IN_MEMORY))
data = data.map(_pp, tf.data.experimental.AUTOTUNE)
data = data.batch(batch_size, drop_remainder=True)
def _mixup(data):
beta_dist = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
beta = tf.cast(beta_dist.sample([]), tf.float32)
data['image'] = (beta * data['image'] +
(1 - beta) * tf.reverse(data['image'], axis=[0]))
data['label'] = (beta * data['label'] +
(1 - beta) * tf.reverse(data['label'], axis=[0]))
return data
if mixup_alpha is not None and mixup_alpha > 0.0 and mode == 'train':
data = data.map(_mixup, tf.data.experimental.AUTOTUNE)
# Shard data such that it can be distributed accross devices
def _shard(data):
data['image'] = tf.reshape(data['image'],
[num_devices, -1, crop_size, crop_size, 3])
data['label'] = tf.reshape(data['label'],
[num_devices, -1, dataset_info['num_classes']])
return data
if num_devices is not None:
data = data.map(_shard, tf.data.experimental.AUTOTUNE)
return data.prefetch(1)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_resolution(original_resolution):
"""Takes (H,W) and returns (precrop, crop)."""
area = original_resolution[0] * original_resolution[1]
return (160, 128) if area < 96*96 else (512, 480)
known_dataset_sizes = {
'cifar10': (32, 32),
'cifar100': (32, 32),
'oxford_iiit_pet': (224, 224),
'oxford_flowers102': (224, 224),
'imagenet2012': (224, 224),
}
def get_resolution_from_dataset(dataset):
if dataset not in known_dataset_sizes:
raise ValueError(f"Unsupported dataset {dataset}. Add your own here :)")
return get_resolution(known_dataset_sizes[dataset])
def get_mixup(dataset_size):
return 0.0 if dataset_size < 20_000 else 0.1
def get_schedule(dataset_size):
if dataset_size < 20_000:
return [100, 200, 300, 400, 500]
elif dataset_size < 500_000:
return [500, 3000, 6000, 9000, 10_000]
else:
return [500, 6000, 12_000, 18_000, 20_000]
def get_lr(step, dataset_size, base_lr=0.003):
"""Returns learning-rate for `step` or None at the end."""
supports = get_schedule(dataset_size)
# Linear warmup
if step < supports[0]:
return base_lr * step / supports[0]
# End of training
elif step >= supports[-1]:
return None
# Staircase decays by factor of 10
else:
for s in supports[1:]:
if s < step:
base_lr /= 10
return base_lr
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding: utf-8
import argparse
import logging
import logging.config
import os
import bit_hyperrule
def argparser(known_models):
parser = argparse.ArgumentParser(description="Fine-tune BiT-M model.")
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring and checkpointing.")
parser.add_argument("--model", choices=list(known_models),
help="Which variant to use; BiT-M gives best results.")
parser.add_argument("--logdir", required=True,
help="Where to log training info (small).")
parser.add_argument("--bit_pretrained_dir", default='.',
help="Where to search for pretrained BiT models.")
parser.add_argument("--dataset", choices=list(bit_hyperrule.known_dataset_sizes.keys()),
help="Choose the dataset. It should be easy to add your own! "
"Don't forget to set --datadir if necessary.")
parser.add_argument("--examples_per_class", type=int, default=None,
help="For the few-shot variant, use this many examples "
"per class only.")
parser.add_argument("--examples_per_class_seed", type=int, default=0,
help="Random seed for selecting examples.")
parser.add_argument("--batch", type=int, default=512,
help="Batch size.")
parser.add_argument("--batch_split", type=int, default=1,
help="Number of batches to compute gradient on before updating weights.")
parser.add_argument("--base_lr", type=float, default=0.003,
help="Base learning-rate for fine-tuning. Most likely default is best.")
parser.add_argument("--eval_every", type=int, default=None,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
return parser
def setup_logger(args):
"""Creates and returns a fancy logger."""
# return logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(message)s")
# Why is setting up proper logging so !@?#! ugly?
os.makedirs(os.path.join(args.logdir, args.name), exist_ok=True)
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
},
},
"handlers": {
"stderr": {
"level": "INFO",
"formatter": "standard",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"logfile": {
"level": "DEBUG",
"formatter": "standard",
"class": "logging.FileHandler",
"filename": os.path.join(args.logdir, args.name, "train.log"),
"mode": "a",
}
},
"loggers": {
"": {
"handlers": ["stderr", "logfile"],
"level": "DEBUG",
"propagate": True
},
}
})
logger = logging.getLogger(__name__)
logger.flush = lambda: [h.flush() for h in logger.handlers]
logger.info(args)
return logger
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.numpy as jnp
import flax.nn as nn
def fixed_padding(x, kernel_size):
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = jax.lax.pad(x, 0.0,
((0, 0, 0),
(pad_beg, pad_end, 0), (pad_beg, pad_end, 0),
(0, 0, 0)))
return x
def standardize(x, axis, eps):
x = x - jnp.mean(x, axis=axis, keepdims=True)
x = x / jnp.sqrt(jnp.mean(jnp.square(x), axis=axis, keepdims=True) + eps)
return x
class GroupNorm(nn.Module):
"""Group normalization (arxiv.org/abs/1803.08494)."""
def apply(self, x, num_groups=32):
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
# Standardize along spatial and group dimensions
x = standardize(x, axis=[1, 2, 4], eps=1e-5)
x = x.reshape(input_shape)
bias_scale_shape = tuple([1, 1, 1] + [input_shape[-1]])
x = x * self.param('scale', bias_scale_shape, nn.initializers.ones)
x = x + self.param('bias', bias_scale_shape, nn.initializers.zeros)
return x
class StdConv(nn.Conv):
def param(self, name, shape, initializer):
param = super().param(name, shape, initializer)
if name == 'kernel':
param = standardize(param, axis=[0, 1, 2], eps=1e-10)
return param
class RootBlock(nn.Module):
def apply(self, x, width):
x = fixed_padding(x, 7)
x = StdConv(x, width, (7, 7), (2, 2),
padding="VALID",
bias=False,
name="conv_root")
x = fixed_padding(x, 3)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding="VALID")
return x
class ResidualUnit(nn.Module):
"""Bottleneck ResNet block."""
def apply(self, x, nout, strides=(1, 1)):
x_shortcut = x
needs_projection = x.shape[-1] != nout * 4 or strides != (1, 1)
group_norm = GroupNorm
conv = StdConv.partial(bias=False)
x = group_norm(x, name="gn1")
x = nn.relu(x)
if needs_projection:
x_shortcut = conv(x, nout * 4, (1, 1), strides, name="conv_proj")
x = conv(x, nout, (1, 1), name="conv1")
x = group_norm(x, name="gn2")
x = nn.relu(x)
x = fixed_padding(x, 3)
x = conv(x, nout, (3, 3), strides, name="conv2", padding='VALID')
x = group_norm(x, name="gn3")
x = nn.relu(x)
x = conv(x, nout * 4, (1, 1), name="conv3")
return x + x_shortcut
class ResidualBlock(nn.Module):
def apply(self, x, block_size, nout, first_stride):
x = ResidualUnit(
x, nout, strides=first_stride,
name="unit01")
for i in range(1, block_size):
x = ResidualUnit(
x, nout, strides=(1, 1),
name=f"unit{i+1:02d}")
return x
class ResNet(nn.Module):
"""ResNetV2."""
def apply(self, x, num_classes=1000,
width_factor=1, num_layers=50):
block_sizes = _block_sizes[num_layers]
width = 64 * width_factor
root_block = RootBlock.partial(width=width)
x = root_block(x, name='root_block')
# Blocks
for i, block_size in enumerate(block_sizes):
x = ResidualBlock(x, block_size, width * 2 ** i,
first_stride=(1, 1) if i == 0 else (2, 2),
name=f"block{i + 1}")
# Pre-head
x = GroupNorm(x, name='norm-pre-head')
x = nn.relu(x)
x = jnp.mean(x, axis=(1, 2))
# Head
x = nn.Dense(x, num_classes, name="conv_head",
kernel_init=nn.initializers.zeros)
return x.astype(jnp.float32)
_block_sizes = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}
KNOWN_MODELS = dict(
[(bit + f'-R{l}x{w}', ResNet.partial(num_layers=l, width_factor=w))
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (152, 2), (101, 3), (152, 4)]]
)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
import flax.optim as optim
import flax.jax_utils as flax_utils
import input_pipeline_tf2_or_jax as input_pipeline
import bit_jax.models as models
import bit_jax.tf2jax as tf2jax
import bit_common
import bit_hyperrule
def main(args):
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {jax.devices()}')
model = models.KNOWN_MODELS[args.model]
# Load weigths of a BiT model
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.npz')
if not os.path.exists(bit_model_file):
raise FileNotFoundError(
f'Model file is not found in "{args.bit_pretrained_dir}" directory.')
with open(bit_model_file, 'rb') as f:
params_tf = np.load(f)
params_tf = dict(zip(params_tf.keys(), params_tf.values()))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(
args.dataset)
# Setup input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
data_train = input_pipeline.get_data(
dataset=args.dataset,
mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_train)
data_test = input_pipeline.get_data(
dataset=args.dataset,
mode='test',
repeats=1, batch_size=args.batch_eval,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=None, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_test)
# Build ResNet architecture
ResNet = model.partial(num_classes=dataset_info['num_classes'])
_, params = ResNet.init_by_shape(
jax.random.PRNGKey(0),
[([1, crop_size, crop_size, 3], jnp.float32)])
resnet_fn = ResNet.call
# pmap replicates the models over all GPUs
resnet_fn_repl = jax.pmap(ResNet.call)
def cross_entropy_loss(*, logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * labels, axis=1))
def loss_fn(params, images, labels):
logits = resnet_fn(params, images)
return cross_entropy_loss(logits=logits, labels=labels)
# Update step, replicated over all GPUs
@partial(jax.pmap, axis_name='batch')
def update_fn(opt, lr, batch):
l, g = jax.value_and_grad(loss_fn)(opt.target,
batch['image'],
batch['label'])
g = jax.tree_map(lambda x: jax.lax.pmean(x, axis_name='batch'), g)
opt = opt.apply_gradient(g, learning_rate=lr)
return opt
# In-place update of randomly initialized weights by BiT weigths
tf2jax.transform_params(params, params_tf,
num_classes=dataset_info['num_classes'])
# Create optimizer and replicate it over all GPUs
opt = optim.Momentum(beta=0.9).create(params)
opt_repl = flax_utils.replicate(opt)
# Delete referenes to the objects that are not needed anymore
del opt
del params
total_steps = bit_hyperrule.get_schedule(dataset_info['num_examples'])[-1]
# Run training loop
for step, batch in zip(range(1, total_steps + 1),
data_train.as_numpy_iterator()):
lr = bit_hyperrule.get_lr(step - 1,
dataset_info['num_examples'],
args.base_lr)
opt_repl = update_fn(opt_repl, flax_utils.replicate(lr), batch)
# Run eval step
if ((args.eval_every and step % args.eval_every == 0)
or (step == total_steps)):
accuracy_test = np.mean([
c
for batch in data_test.as_numpy_iterator()
for c in (
np.argmax(resnet_fn_repl(opt_repl.target, batch['image']), axis=2) ==
np.argmax(batch['label'], axis=2)).ravel()])
logger.info(
f'Step: {step}, '
f'learning rate: {lr:.07f}, '
f'Test accuracy: {accuracy_test:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import re
def transform_params(params, params_tf, num_classes):
# BiT and JAX models have different naming conventions, so we need to
# properly map TF weights to JAX weights
params['root_block']['conv_root']['kernel'] = (
params_tf['resnet/root_block/standardized_conv2d/kernel'])
for block in ['block1', 'block2', 'block3', 'block4']:
units = set([re.findall(r'unit\d+', p)[0] for p in params_tf.keys()
if p.find(block) >= 0])
for unit in units:
for i, group in enumerate(['a', 'b', 'c']):
params[block][unit][f'conv{i+1}']['kernel'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'standardized_conv2d/kernel'])
params[block][unit][f'gn{i+1}']['bias'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'group_norm/beta'][None, None, None])
params[block][unit][f'gn{i+1}']['scale'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'group_norm/gamma'][None, None, None])
projs = [p for p in params_tf.keys()
if p.find(f'{block}/{unit}/a/proj') >= 0]
assert len(projs) <= 1
if projs:
params[block][unit]['conv_proj']['kernel'] = params_tf[projs[0]]
params['norm-pre-head']['bias'] = (
params_tf['resnet/group_norm/beta'][None, None, None])
params['norm-pre-head']['scale'] = (
params_tf['resnet/group_norm/gamma'][None, None, None])
params['conv_head']['kernel'] = np.zeros(
(params['conv_head']['kernel'].shape[0], num_classes), dtype=np.float32)
params['conv_head']['bias'] = np.zeros(num_classes, dtype=np.float32)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Bottleneck ResNet v2 with GroupNorm and Weight Standardization."""
from collections import OrderedDict # pylint: disable=g-importing-member
import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride,
padding=1, bias=bias, groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride,
padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout//4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original code has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if (stride != 1 or cin != cout):
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
# Unit's branch
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
# The following will be unreadable if we split lines.
# pylint: disable=line-too-long
self.root = nn.Sequential(OrderedDict([
('conv', StdConv2d(3, 64*wf, kernel_size=7, stride=2, padding=3, bias=False)),
('pad', nn.ConstantPad2d(1, 0)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=64*wf, cout=256*wf, cmid=64*wf))] +
[(f'unit{i:02d}', PreActBottleneck(cin=256*wf, cout=256*wf, cmid=64*wf)) for i in range(2, block_units[0] + 1)],
))),
('block2', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=256*wf, cout=512*wf, cmid=128*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=512*wf, cout=512*wf, cmid=128*wf)) for i in range(2, block_units[1] + 1)],
))),
('block3', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=512*wf, cout=1024*wf, cmid=256*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=1024*wf, cout=1024*wf, cmid=256*wf)) for i in range(2, block_units[2] + 1)],
))),
('block4', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=1024*wf, cout=2048*wf, cmid=512*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=2048*wf, cout=2048*wf, cmid=512*wf)) for i in range(2, block_units[3] + 1)],
))),
]))
# pylint: enable=line-too-long
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([
('gn', nn.GroupNorm(32, 2048*wf)),
('relu', nn.ReLU(inplace=True)),
('avg', nn.AdaptiveAvgPool2d(output_size=1)),
('conv', nn.Conv2d(2048*wf, head_size, kernel_size=1, bias=True)),
]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[...,0,0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) # pylint: disable=line-too-long
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel'])) # pylint: disable=line-too-long
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
KNOWN_MODELS = OrderedDict([
('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
])
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Various utilities from my toolbox at github.com/lucasb-eyer/lbtoolbox."""
import collections
import json
import signal
import time
import numpy as np
class Uninterrupt:
"""Context manager to gracefully handle interrupts.
Use as:
with Uninterrupt() as u:
while not u.interrupted:
# train
"""
def __init__(self, sigs=(signal.SIGINT, signal.SIGTERM), verbose=False):
self.sigs = sigs
self.verbose = verbose
self.interrupted = False
self.orig_handlers = None
def __enter__(self):
if self.orig_handlers is not None:
raise ValueError("Can only enter `Uninterrupt` once!")
self.interrupted = False
self.orig_handlers = [signal.getsignal(sig) for sig in self.sigs]
def handler(signum, frame):
del signum # unused
del frame # unused
self.release()
self.interrupted = True
if self.verbose:
print("Interruption scheduled...", flush=True)
for sig in self.sigs:
signal.signal(sig, handler)
return self
def __exit__(self, type_, value, tb):
self.release()
def release(self):
if self.orig_handlers is not None:
for sig, orig in zip(self.sigs, self.orig_handlers):
signal.signal(sig, orig)
self.orig_handlers = None
class Timer:
"""Context timing its scope."""
def __init__(self, donecb):
self.cb = donecb
def __enter__(self):
self.t0 = time.time()
def __exit__(self, exc_type, exc_value, traceback):
t = time.time() - self.t0
self.cb(t)
class Chrono:
"""Chronometer for poor-man's (but convenient!) profiling."""
def __init__(self):
self.timings = collections.OrderedDict()
def measure(self, what):
return Timer(lambda t: self._done(what, t))
def _done(self, what, t):
self.timings.setdefault(what, []).append(t)
def times(self, what):
return self.timings[what]
def avgtime(self, what, dropfirst=False):
timings = self.timings[what]
if dropfirst and len(timings) > 1:
timings = timings[1:]
return sum(timings)/len(timings)
def __str__(self, fmt="{:{w}.5f}", dropfirst=False):
avgtimes = {k: self.avgtime(k, dropfirst) for k in self.timings}
l = max(map(len, avgtimes))
w = max(len(fmt.format(v, w=0)) for v in avgtimes.values())
avg_by_time = sorted(avgtimes.items(), key=lambda t: t[1], reverse=True)
return "\n".join(f"{name:{l}s}: " + fmt.format(t, w=w) + "s"
for name, t in avg_by_time)
def create_dat(basename, dtype, shape, fillvalue=None, **meta):
"""Creates mem-mapped numpy array plus metadata.
Creates a data file at `basename` and returns a writeable mem-map backed
numpy array to it. Can also be passed any json-serializable keys and values
in `meta`.
"""
xm = np.memmap(basename, mode="w+", dtype=dtype, shape=shape)
xa = np.ndarray.__new__(np.ndarray, dtype=dtype, shape=shape, buffer=xm)
# xa.flush = xm.flush # Sadly, we can't just add attributes to a numpy array, need to subclass it.
if fillvalue is not None:
xa.fill(fillvalue)
# xa.flush()
xm.flush()
meta.setdefault("dtype", np.dtype(dtype).str)
meta.setdefault("shape", shape)
json.dump(meta, open(basename + ".json", "w+"))
return xa
def load_dat(basename, mode="r"):
"""Loads file created via `create_dat` as mem-mapped numpy array.
Returns a read-only mem-mapped numpy array to file at `basename`.
If `mode` is set to `'r+'`, the data can be written, too.
"""
desc = json.load(open(basename + ".json", "r"))
dtype, shape = desc["dtype"], desc["shape"]
xm = np.memmap(basename, mode=mode, dtype=dtype, shape=shape)
xa = np.ndarray.__new__(np.ndarray, dtype=dtype, shape=shape, buffer=xm)
# xa.flush = xm.flush # Sadly, we can't just add attributes to a numpy array, need to subclass it.
return xa
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fine-tune a BiT model on some downstream dataset."""
#!/usr/bin/env python3
# coding: utf-8
from os.path import join as pjoin # pylint: disable=g-importing-member
import time
import numpy as np
import torch
import torchvision as tv
import bit_pytorch.fewshot as fs
import bit_pytorch.lbtoolbox as lb
import bit_pytorch.models as models
import bit_common
import bit_hyperrule
def topk(output, target, ks=(1,)):
"""Returns one boolean vector for each k, whether the target is within the output's top-k."""
_, pred = output.topk(max(ks), 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [correct[:k].max(0)[0] for k in ks]
def recycle(iterable):
"""Variant of itertools.cycle that does not save iterates."""
while True:
for i in iterable:
yield i
def mktrainval(args, logger):
"""Returns train and validation datasets."""
precrop, crop = bit_hyperrule.get_resolution_from_dataset(args.dataset)
train_tx = tv.transforms.Compose([
tv.transforms.Resize((precrop, precrop)),
tv.transforms.RandomCrop((crop, crop)),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
val_tx = tv.transforms.Compose([
tv.transforms.Resize((crop, crop)),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if args.dataset == "cifar10":
train_set = tv.datasets.CIFAR10(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR10(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "cifar100":
train_set = tv.datasets.CIFAR100(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR100(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "imagenet2012":
train_set = tv.datasets.ImageFolder(pjoin(args.datadir, "train"), train_tx)
valid_set = tv.datasets.ImageFolder(pjoin(args.datadir, "val"), val_tx)
else:
raise ValueError(f"Sorry, we have not spent time implementing the "
f"{args.dataset} dataset in the PyTorch codebase. "
f"In principle, it should be easy to add :)")
if args.examples_per_class is not None:
logger.info(f"Looking for {args.examples_per_class} images per class...")
indices = fs.find_fewshot_indices(train_set, args.examples_per_class)
train_set = torch.utils.data.Subset(train_set, indices=indices)
logger.info(f"Using a training set with {len(train_set)} images.")
logger.info(f"Using a validation set with {len(valid_set)} images.")
micro_batch_size = args.batch // args.batch_split
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=micro_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
if micro_batch_size <= len(train_set):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=False)
else:
# In the few-shot cases, the total dataset size might be smaller than the batch-size.
# In these cases, the default sampler doesn't repeat, so we need to make it do that
# if we want to match the behaviour from the paper.
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, num_workers=args.workers, pin_memory=True,
sampler=torch.utils.data.RandomSampler(train_set, replacement=True, num_samples=micro_batch_size))
return train_set, valid_set, train_loader, valid_loader
def run_eval(model, data_loader, device, chrono, logger, step):
# switch to evaluate mode
model.eval()
logger.info("Running validation...")
logger.flush()
all_c, all_top1, all_top5 = [], [], []
end = time.time()
for b, (x, y) in enumerate(data_loader):
with torch.no_grad():
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# measure data loading time
chrono._done("eval load", time.time() - end)
# compute output, measure accuracy and record loss.
with chrono.measure("eval fprop"):
logits = model(x)
c = torch.nn.CrossEntropyLoss(reduction='none')(logits, y)
top1, top5 = topk(logits, y, ks=(1, 5))
all_c.extend(c.cpu()) # Also ensures a sync point.
all_top1.extend(top1.cpu())
all_top5.extend(top5.cpu())
# measure elapsed time
end = time.time()
model.train()
logger.info(f"Validation@{step} loss {np.mean(all_c):.5f}, "
f"top1 {np.mean(all_top1):.2%}, "
f"top5 {np.mean(all_top5):.2%}")
logger.flush()
return all_c, all_top1, all_top5
def mixup_data(x, y, l):
"""Returns mixed inputs, pairs of targets, and lambda"""
indices = torch.randperm(x.shape[0]).to(x.device)
mixed_x = l * x + (1 - l) * x[indices]
y_a, y_b = y, y[indices]
return mixed_x, y_a, y_b
def mixup_criterion(criterion, pred, y_a, y_b, l):
return l * criterion(pred, y_a) + (1 - l) * criterion(pred, y_b)
def main(args):
logger = bit_common.setup_logger(args)
# Lets cuDNN benchmark conv implementations and choose the fastest.
# Only good if sizes stay the same within the main loop!
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Going to train on {device}")
train_set, valid_set, train_loader, valid_loader = mktrainval(args, logger)
logger.info(f"Loading model from {args.model}.npz")
model = models.KNOWN_MODELS[args.model](head_size=len(valid_set.classes), zero_head=True)
model.load_from(np.load(f"{args.model}.npz"))
logger.info("Moving model onto all GPUs")
model = torch.nn.DataParallel(model)
# Optionally resume from a checkpoint.
# Load it to CPU first as we'll move the model to GPU later.
# This way, we save a little bit of GPU memory when loading.
step = 0
# Note: no weight-decay!
optim = torch.optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
# Resume fine-tuning if we find a saved model.
savename = pjoin(args.logdir, args.name, "bit.pth.tar")
try:
logger.info(f"Model will be saved in '{savename}'")
checkpoint = torch.load(savename, map_location="cpu")
logger.info(f"Found saved model to resume from at '{savename}'")
step = checkpoint["step"]
model.load_state_dict(checkpoint["model"])
optim.load_state_dict(checkpoint["optim"])
logger.info(f"Resumed at step {step}")
except FileNotFoundError:
logger.info("Fine-tuning from BiT")
model = model.to(device)
optim.zero_grad()
model.train()
mixup = bit_hyperrule.get_mixup(len(train_set))
cri = torch.nn.CrossEntropyLoss().to(device)
logger.info("Starting training!")
chrono = lb.Chrono()
accum_steps = 0
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
end = time.time()
with lb.Uninterrupt() as u:
for x, y in recycle(train_loader):
# measure data loading time, which is spent in the `for` statement.
chrono._done("load", time.time() - end)
if u.interrupted:
break
# Schedule sending to GPU(s)
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# Update learning-rate, including stop training if over.
lr = bit_hyperrule.get_lr(step, len(train_set), args.base_lr)
if lr is None:
break
for param_group in optim.param_groups:
param_group["lr"] = lr
if mixup > 0.0:
x, y_a, y_b = mixup_data(x, y, mixup_l)
# compute output
with chrono.measure("fprop"):
logits = model(x)
if mixup > 0.0:
c = mixup_criterion(cri, logits, y_a, y_b, mixup_l)
else:
c = cri(logits, y)
c_num = float(c.data.cpu().numpy()) # Also ensures a sync point.
# Accumulate grads
with chrono.measure("grads"):
(c / args.batch_split).backward()
accum_steps += 1
accstep = f" ({accum_steps}/{args.batch_split})" if args.batch_split > 1 else ""
logger.info(f"[step {step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})") # pylint: disable=logging-format-interpolation
logger.flush()
# Update params
if accum_steps == args.batch_split:
with chrono.measure("update"):
optim.step()
optim.zero_grad()
step += 1
accum_steps = 0
# Sample new mixup ratio for next batch
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
# Run evaluation and save the model.
if args.eval_every and step % args.eval_every == 0:
run_eval(model, valid_loader, device, chrono, logger, step)
if args.save:
torch.save({
"step": step,
"model": model.state_dict(),
"optim" : optim.state_dict(),
}, savename)
end = time.time()
# Final eval at end of training.
run_eval(model, valid_loader, device, chrono, logger, step='end')
logger.info(f"Timings:\n{chrono}")
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--datadir", required=True,
help="Path to the ImageNet data folder, preprocessed for torchvision.")
parser.add_argument("--workers", type=int, default=8,
help="Number of background threads used to load data.")
parser.add_argument("--no-save", dest="save", action="store_false")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility to find k-shot dataset indices, outputs the indices on stdout."""
#!/usr/bin/env python3
# coding: utf-8
from collections import *
from functools import *
import random
import sys
import torch
import torchvision as tv
class AddIndexIter(torch.utils.data.dataloader._SingleProcessDataLoaderIter):
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = torch.utils.data._utils.pin_memory.pin_memory(data)
return index, data
def find_indices_loader(loader, n_shots, n_classes):
per_label_indices = defaultdict(partial(deque, maxlen=n_shots))
for ibatch, (indices, (images, labels)) in enumerate(AddIndexIter(loader)):
for idx, lbl in zip(indices, labels):
per_label_indices[lbl.item()].append(idx)
findings = sum(map(len, per_label_indices.values()))
if findings == n_shots * n_classes:
return per_label_indices
raise RuntimeError("Unable to find enough examples!")
def find_fewshot_indices(dataset, n_shots):
n_classes = len(dataset.classes)
orig_transform = dataset.transform
dataset.transform = tv.transforms.Compose([
tv.transforms.CenterCrop(1),
tv.transforms.ToTensor()
])
# TODO(lbeyer): if dataset isinstance DatasetFolder, we can (maybe?) do much better!
loader = torch.utils.data.DataLoader(dataset, batch_size=1024, shuffle=True, num_workers=0)
per_label_indices = find_indices_loader(loader, n_shots, n_classes)
all_indices = [i for indices in per_label_indices.values() for i in indices]
random.shuffle(all_indices)
dataset.transform = orig_transform
return all_indices
if __name__ == "__main__":
dataset = tv.datasets.ImageFolder(sys.argv[2], preprocess)
all_indices = find_fewshot_indices(dataset, int(sys.argv[1]))
for i in all_indices:
print(i)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet architecture as used in BiT."""
import tensorflow.compat.v2 as tf
from . import normalization
def add_name_prefix(name, prefix=None):
return prefix + "/" + name if prefix else name
class ReLU(tf.keras.layers.ReLU):
def compute_output_shape(self, input_shape):
return tf.TensorShape(input_shape)
class PaddingFromKernelSize(tf.keras.layers.Layer):
"""Layer that adds padding to an image taking into a given kernel size."""
def __init__(self, kernel_size, **kwargs):
super(PaddingFromKernelSize, self).__init__(**kwargs)
pad_total = kernel_size - 1
self._pad_beg = pad_total // 2
self._pad_end = pad_total - self._pad_beg
def compute_output_shape(self, input_shape):
batch_size, height, width, channels = tf.TensorShape(input_shape).as_list()
if height is not None:
height = height + self._pad_beg + self._pad_end
if width is not None:
width = width + self._pad_beg + self._pad_end
return tf.TensorShape((batch_size, height, width, channels))
def call(self, x):
padding = [
[0, 0],
[self._pad_beg, self._pad_end],
[self._pad_beg, self._pad_end],
[0, 0]]
return tf.pad(x, padding)
class StandardizedConv2D(tf.keras.layers.Conv2D):
"""Implements the abs/1903.10520 technique (see go/dune-gn).
You can simply replace any Conv2D with this one to use re-parametrized
convolution operation in which the kernels are standardized before conv.
Note that it does not come with extra learnable scale/bias parameters,
as those used in "Weight normalization" (abs/1602.07868). This does not
matter if combined with BN/GN/..., but it would matter if the convolution
was used standalone.
Author: Lucas Beyer
"""
def build(self, input_shape):
super(StandardizedConv2D, self).build(input_shape)
# Wrap a standardization around the conv OP.
default_conv_op = self._convolution_op
def standardized_conv_op(inputs, kernel):
# Kernel has shape HWIO, normalize over HWI
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
# Author code uses std + 1e-5
return default_conv_op(inputs, (kernel - mean) / tf.sqrt(var + 1e-10))
self._convolution_op = standardized_conv_op
self.built = True
class BottleneckV2Unit(tf.keras.layers.Layer):
"""Implements a standard ResNet's unit (version 2).
"""
def __init__(self, num_filters, stride=1, **kwargs):
"""Initializer.
Args:
num_filters: number of filters in the bottleneck.
stride: specifies block's stride.
**kwargs: other tf.keras.layers.Layer keyword arguments.
"""
super(BottleneckV2Unit, self).__init__(**kwargs)
self._num_filters = num_filters
self._stride = stride
self._proj = None
self._unit_a = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
], name="a")
self._unit_a_conv = StandardizedConv2D(
filters=num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/standardized_conv2d")
self._unit_b = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
PaddingFromKernelSize(kernel_size=3),
StandardizedConv2D(
filters=num_filters,
kernel_size=3,
strides=stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="b")
self._unit_c = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
StandardizedConv2D(
filters=4 * num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="c")
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
# Add projection layer if necessary.
if (self._stride > 1) or (4 * self._num_filters != input_shape[-1]):
self._proj = StandardizedConv2D(
filters=4 * self._num_filters,
kernel_size=1,
strides=self._stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/proj/standardized_conv2d")
self.built = True
def compute_output_shape(self, input_shape):
current_shape = self._unit_a.compute_output_shape(input_shape)
current_shape = self._unit_a_conv.compute_output_shape(current_shape)
current_shape = self._unit_b.compute_output_shape(current_shape)
current_shape = self._unit_c.compute_output_shape(current_shape)
return current_shape
def call(self, x):
x_shortcut = x
# Unit "a".
x = self._unit_a(x)
if self._proj is not None:
x_shortcut = self._proj(x)
x = self._unit_a_conv(x)
# Unit "b".
x = self._unit_b(x)
# Unit "c".
x = self._unit_c(x)
return x + x_shortcut
class ResnetV2(tf.keras.Model):
"""Generic ResnetV2 architecture, as used in the BiT paper."""
def __init__(self,
num_units=(3, 4, 6, 3),
num_outputs=1000,
filters_factor=4,
strides=(1, 2, 2, 2),
**kwargs):
super(ResnetV2, self).__init__(**kwargs)
num_blocks = len(num_units)
num_filters = tuple(16 * filters_factor * 2**b for b in range(num_blocks))
self._root = self._create_root_block(num_filters=num_filters[0])
self._blocks = []
for b, (f, u, s) in enumerate(zip(num_filters, num_units, strides), 1):
n = "block{}".format(b)
self._blocks.append(
self._create_block(num_units=u, num_filters=f, stride=s, name=n))
self._pre_head = [
normalization.GroupNormalization(name="group_norm"),
ReLU(),
tf.keras.layers.GlobalAveragePooling2D()
]
self._head = None
if num_outputs:
self._head = tf.keras.layers.Dense(
units=num_outputs,
use_bias=True,
kernel_initializer="zeros",
trainable=self.trainable,
name="head/dense")
def _create_root_block(self,
num_filters,
conv_size=7,
conv_stride=2,
pool_size=3,
pool_stride=2):
layers = [
PaddingFromKernelSize(conv_size),
StandardizedConv2D(
filters=num_filters,
kernel_size=conv_size,
strides=conv_stride,
trainable=self.trainable,
use_bias=False,
name="standardized_conv2d"),
PaddingFromKernelSize(pool_size),
tf.keras.layers.MaxPool2D(
pool_size=pool_size, strides=pool_stride, padding="valid")
]
return tf.keras.Sequential(layers, name="root_block")
def _create_block(self, num_units, num_filters, stride, name):
layers = []
for i in range(1, num_units + 1):
layers.append(
BottleneckV2Unit(
num_filters=num_filters,
stride=(stride if i == 1 else 1),
name="unit%02d" % i))
return tf.keras.Sequential(layers, name=name)
def compute_output_shape(self, input_shape):
current_shape = self._root.compute_output_shape(input_shape)
for block in self._blocks:
current_shape = block.compute_output_shape(current_shape)
for layer in self._pre_head:
current_shape = layer.compute_output_shape(current_shape)
if self._head is not None:
batch_size, features = current_shape.as_list()
current_shape = (batch_size, 1, 1, features)
current_shape = self._head.compute_output_shape(current_shape).as_list()
current_shape = (current_shape[0], current_shape[3])
return tf.TensorShape(current_shape)
def call(self, x):
x = self._root(x)
for block in self._blocks:
x = block(x)
for layer in self._pre_head:
x = layer(x)
if self._head is not None:
x = self._head(x)
return x
KNOWN_MODELS = {
f'{bit}-R{l}x{w}': f'gs://bit_models/{bit}-R{l}x{w}.h5'
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (101, 3), (152, 4)]
}
NUM_UNITS = {
k: (3, 4, 6, 3) if 'R50' in k else
(3, 4, 23, 3) if 'R101' in k else
(3, 8, 36, 3)
for k in KNOWN_MODELS
}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding: utf-8
from functools import partial
import time
import os
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import bit_common
import bit_hyperrule
import bit_tf2.models as models
import input_pipeline_tf2_or_jax as input_pipeline
def reshape_for_keras(features, batch_size, crop_size):
features["image"] = tf.reshape(features["image"], (batch_size, crop_size, crop_size, 3))
features["label"] = tf.reshape(features["label"], (batch_size, -1))
return (features["image"], features["label"])
class BiTLRSched(tf.keras.callbacks.Callback):
def __init__(self, base_lr, num_samples):
self.step = 0
self.base_lr = base_lr
self.num_samples = num_samples
def on_train_batch_begin(self, batch, logs=None):
lr = bit_hyperrule.get_lr(self.step, self.num_samples, self.base_lr)
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
self.step += 1
def main(args):
tf.io.gfile.makedirs(args.logdir)
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {tf.config.list_physical_devices()}')
tf.io.gfile.makedirs(args.bit_pretrained_dir)
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.h5')
if not tf.io.gfile.exists(bit_model_file):
model_url = models.KNOWN_MODELS[args.model]
logger.info(f'Downloading the model from {model_url}...')
tf.io.gfile.copy(model_url, bit_model_file)
# Set up input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
# Distribute training
strategy = tf.distribute.MirroredStrategy()
num_devices = strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(args.dataset)
data_train = input_pipeline.get_data(
dataset=args.dataset, mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_test = input_pipeline.get_data(
dataset=args.dataset, mode='test',
repeats=1, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=1, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_train = data_train.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
data_test = data_test.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
with strategy.scope():
filters_factor = int(args.model[-1])*4
model = models.ResnetV2(
num_units=models.NUM_UNITS[args.model],
num_outputs=21843,
filters_factor=filters_factor,
name="resnet",
trainable=True,
dtype=tf.float32)
model.build((None, None, None, 3))
logger.info(f'Loading weights...')
model.load_weights(bit_model_file)
logger.info(f'Weights loaded into model!')
model._head = tf.keras.layers.Dense(
units=dataset_info['num_classes'],
use_bias=True,
kernel_initializer="zeros",
trainable=True,
name="head/dense")
lr_supports = bit_hyperrule.get_schedule(dataset_info['num_examples'])
schedule_length = lr_supports[-1]
# NOTE: Let's not do that unless verified necessary and we do the same
# across all three codebases.
# schedule_length = schedule_length * 512 / args.batch
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
logger.info(f'Fine-tuning the model...')
steps_per_epoch = args.eval_every or schedule_length
history = model.fit(
data_train,
steps_per_epoch=steps_per_epoch,
epochs=schedule_length // steps_per_epoch,
validation_data=data_test, # here we are only using
# this data to evaluate our performance
callbacks=[BiTLRSched(args.base_lr, dataset_info['num_examples'])],
)
for epoch, accu in enumerate(history.history['val_accuracy']):
logger.info(
f'Step: {epoch * args.eval_every}, '
f'Test accuracy: {accu:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group normalization."""
import tensorflow.compat.v2 as tf
def group_normalize(x, gamma, beta, num_groups=None, group_size=None, eps=1e-5):
"""Applies group-normalization to NHWC `x` (see abs/1803.08494, go/dune-gn).
This function just does the math, if you want a "layer" that creates the
necessary variables etc., see `group_norm` below.
You must either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
Args:
x: N..C-tensor, the input to group-normalize. For images, this would be a
NHWC-tensor, for time-series a NTC, for videos a NHWTC or NTHWC, all of
them work, as normalization includes everything between N and C. Even just
NC shape works, as C is grouped and normalized.
gamma: tensor with C entries, learnable scale after normalization.
beta: tensor with C entries, learnable bias after normalization.
num_groups: int, number of groups to normalize over (divides C).
group_size: int, size of the groups to normalize over (divides C).
eps: float, a small additive constant to avoid /sqrt(0).
Returns:
Group-normalized `x`, of the same shape and type as `x`.
Author: Lucas Beyer
"""
assert x.shape.ndims >= 2, (
"Less than 2-dim Tensor passed to GroupNorm. Something's fishy.")
num_channels = x.shape[-1]
assert num_channels is not None, "Cannot apply GroupNorm on dynamic channels."
assert (num_groups is None) != (group_size is None), (
"You must specify exactly one of `num_groups`, `group_size`")
if group_size is not None:
num_groups = num_channels // group_size
assert num_channels % num_groups == 0, (
"GroupNorm: {} not divisible by {}".format(num_channels, num_groups))
orig_shape = tf.shape(x)
# This shape is NHWGS where G is #groups and S is group-size.
extra_shape = [num_groups, num_channels // num_groups]
group_shape = tf.concat([orig_shape[:-1], extra_shape], axis=-1)
x = tf.reshape(x, group_shape)
# The dimensions to normalize over: HWS for images, but more generally all
# dimensions except N (batch, first) and G (cross-groups, next-to-last).
# So more visually, normdims are the dots in N......G. (note the last one is
# also a dot, not a full-stop, argh!)
normdims = list(range(1, x.shape.ndims - 2)) + [x.shape.ndims - 1]
mean, var = tf.nn.moments(x, normdims, keepdims=True)
# Interestingly, we don't have a beta/gamma per group, but still one per
# channel, at least according to the original paper. Reshape such that they
# broadcast correctly.
beta = tf.reshape(beta, extra_shape)
gamma = tf.reshape(gamma, extra_shape)
x = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return tf.reshape(x, orig_shape)
class GroupNormalization(tf.keras.layers.Layer):
"""A group-norm "layer" (see abs/1803.08494 go/dune-gn).
This function creates beta/gamma variables in a name_scope, and uses them to
apply `group_normalize` on the input `x`.
You can either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
If you specify neither, the paper's recommended `num_groups=32` is used.
Authors: Lucas Beyer, Joan Puigcerver.
"""
def __init__(self,
num_groups=None,
group_size=None,
eps=1e-5,
beta_init=tf.zeros_initializer(),
gamma_init=tf.ones_initializer(),
**kwargs):
"""Initializer.
Args:
num_groups: int, the number of channel-groups to normalize over.
group_size: int, size of the groups to normalize over.
eps: float, a small additive constant to avoid /sqrt(0).
beta_init: initializer for bias, defaults to zeros.
gamma_init: initializer for scale, defaults to ones.
**kwargs: other tf.keras.layers.Layer arguments.
"""
super(GroupNormalization, self).__init__(**kwargs)
if num_groups is None and group_size is None:
num_groups = 32
self._num_groups = num_groups
self._group_size = group_size
self._eps = eps
self._beta_init = beta_init
self._gamma_init = gamma_init
def build(self, input_size):
channels = input_size[-1]
assert channels is not None, "Cannot apply GN on dynamic channels."
self._gamma = self.add_weight(
name="gamma", shape=(channels,), initializer=self._gamma_init,
dtype=self.dtype)
self._beta = self.add_weight(
name="beta", shape=(channels,), initializer=self._beta_init,
dtype=self.dtype)
super(GroupNormalization, self).build(input_size)
def call(self, x):
return group_normalize(x, self._gamma, self._beta, self._num_groups,
self._group_size, self._eps)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reference implementation of AugMix's data augmentation method in numpy."""
import augmentations
import numpy as np
from PIL import Image
# CIFAR-10 constants
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.2010]
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
image = image.transpose(2, 0, 1) # Switch to channel-first
mean, std = np.array(MEAN), np.array(STD)
image = (image - mean[:, None, None]) / std[:, None, None]
return image.transpose(1, 2, 0)
def apply_op(image, op, severity):
image = np.clip(image * 255., 0, 255).astype(np.uint8)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img) / 255.
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.float32(
np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image)
for i in range(width):
image_aug = image.copy()
d = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(d):
op = np.random.choice(augmentations.augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * normalize(image) + m * mix
return mixed
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on ImageNet.
Currently only supports ResNet-50 training.
Example usage:
`python imagenet.py <path/to/ImageNet> <path/to/ImageNet-C>`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import models
from torchvision import transforms
augmentations.IMAGE_SIZE = 224
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__') and
callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Trains an ImageNet Classifier')
parser.add_argument(
'clean_data', metavar='DIR', help='path to clean ImageNet dataset')
parser.add_argument(
'corrupted_data', metavar='DIR_C', help='path to ImageNet-C dataset')
parser.add_argument(
'--model',
'-m',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=90, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=256, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0001,
help='Weight decay (L2 penalty).')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=1,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--aug-prob-coeff',
default=1.,
type=float,
help='Probability distribution coefficients')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=10,
help='Training loss print frequency (batches).')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
# Raw AlexNet errors taken from https://github.com/hendrycks/robustness
ALEXNET_ERR = [
0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,
0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,
0.606500
]
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs."""
b = args.batch_size / 256.
k = args.epochs // 3
if epoch < k:
m = 1
elif epoch < 2 * k:
m = 0.1
else:
m = 0.01
lr = args.learning_rate * m * b
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def compute_mce(corruption_accs):
"""Compute mCE (mean Corruption Error) normalized by AlexNet performance."""
mce = 0.
for i in range(len(CORRUPTIONS)):
avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])
ce = 100 * avg_err / ALEXNET_ERR[i]
mce += ce / 15
return mce
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(
np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width))
m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer):
"""Train for one epoch."""
net.train()
data_ema = 0.
batch_ema = 0.
loss_ema = 0.
acc1_ema = 0.
acc5_ema = 0.
end = time.time()
for i, (images, targets) in enumerate(train_loader):
# Compute data loading time
data_time = time.time() - end
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
acc1, acc5 = accuracy(logits_clean, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
loss.backward()
optimizer.step()
# Compute batch computation time and update moving averages.
batch_time = time.time() - end
end = time.time()
data_ema = data_ema * 0.1 + float(data_time) * 0.9
batch_ema = batch_ema * 0.1 + float(batch_time) * 0.9
loss_ema = loss_ema * 0.1 + float(loss) * 0.9
acc1_ema = acc1_ema * 0.1 + float(acc1) * 0.9
acc5_ema = acc5_ema * 0.1 + float(acc5) * 0.9
if i % args.print_freq == 0:
print(
'Batch {}/{}: Data Time {:.3f} | Batch Time {:.3f} | Train Loss {:.3f} | Train Acc1 '
'{:.3f} | Train Acc5 {:.3f}'.format(i, len(train_loader), data_ema,
batch_ema, loss_ema, acc1_ema,
acc5_ema))
return loss_ema, acc1_ema, batch_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_transform):
"""Evaluate network on given corrupted dataset."""
corruption_accs = {}
for c in CORRUPTIONS:
print(c)
for s in range(1, 6):
valdir = os.path.join(args.corrupted_data, c, str(s))
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
loss, acc1 = test(net, val_loader)
if c in corruption_accs:
corruption_accs[c].append(acc1)
else:
corruption_accs[c] = [acc1]
print('\ts={}: Test Loss {:.3f} | Test Acc1 {:.3f}'.format(
s, loss, 100. * acc1))
return corruption_accs
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose(
[transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
preprocess,
])
traindir = os.path.join(args.clean_data, 'train')
valdir = os.path.join(args.clean_data, 'val')
train_dataset = datasets.ImageFolder(traindir, train_transform)
train_dataset = AugMixDataset(train_dataset, preprocess)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.model))
net = models.__dict__[args.model](pretrained=True)
else:
print("=> creating model '{}'".format(args.model))
net = models.__dict__[args.model]()
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc1 = checkpoint['best_acc1']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
test_loss, test_acc1 = test(net, val_loader)
print('Clean\n\tTest Loss {:.3f} | Test Acc1 {:.3f}'.format(
test_loss, 100 * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join([c] + map(str, corruption_accs[c])))
print('mCE (normalized by AlexNet): ', compute_mce(corruption_accs))
return
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
'imagenet_{}_training_log.csv'.format(args.model))
with open(log_path, 'w') as f:
f.write(
'epoch,batch_time,train_loss,train_acc1(%),test_loss,test_acc1(%)\n')
best_acc1 = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train_loss_ema, train_acc1_ema, batch_ema = train(net, train_loader,
optimizer)
test_loss, test_acc1 = test(net, val_loader)
is_best = test_acc1 > best_acc1
best_acc1 = max(test_acc1, best_acc1)
checkpoint = {
'epoch': epoch,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%0.3f,%0.6f,%0.2f,%0.5f,%0.2f\n' % (
(epoch + 1),
batch_ema,
train_loss_ema,
100. * train_acc1_ema,
test_loss,
100. * test_acc1,
))
print(
'Epoch {:3d} | Train Loss {:.4f} | Test Loss {:.3f} | Test Acc1 '
'{:.2f}'
.format((epoch + 1), train_loss_ema, test_loss, 100. * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join(map(str, [c] + corruption_accs[c])))
print('mCE (normalized by AlexNet):', compute_mce(corruption_accs))
if __name__ == '__main__':
main()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on CIFAR-10/100.
Supports WideResNet, AllConv, ResNeXt models on CIFAR-10 and CIFAR-100 as well
as evaluation on CIFAR-10-C and CIFAR-100-C.
Example usage:
`python cifar.py`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
from models.cifar.allconv import AllConvNet
import numpy as np
from third_party.ResNeXt_DenseNet.models.densenet import densenet
from third_party.ResNeXt_DenseNet.models.resnext import resnext29
from third_party.WideResNet_pytorch.wideresnet import WideResNet
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
parser = argparse.ArgumentParser(
description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default='cifar10',
choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument(
'--model',
'-m',
type=str,
default='wrn',
choices=['wrn', 'allconv', 'densenet', 'resnext'],
help='Choose architecture.')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=100, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0005,
help='Weight decay (L2 penalty).')
# WRN Architecture options
parser.add_argument(
'--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='Widen factor')
parser.add_argument(
'--droprate', default=0.0, type=float, help='Dropout probability')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=3,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=50,
help='Training loss print frequency (batches).')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 +
np.cos(step / total_steps * np.pi))
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(np.random.dirichlet([1] * args.mixture_width))
m = np.float32(np.random.beta(1, 1))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer, scheduler):
"""Train for one epoch."""
net.train()
loss_ema = 0.
for i, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
loss.backward()
optimizer.step()
scheduler.step()
loss_ema = loss_ema * 0.9 + float(loss) * 0.1
if i % args.print_freq == 0:
print('Train Loss {:.3f}'.format(loss_ema))
return loss_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_data, base_path):
"""Evaluate network on given corrupted dataset."""
corruption_accs = []
for corruption in CORRUPTIONS:
# Reference to original data is mutated
test_data.data = np.load(base_path + corruption + '.npy')
test_data.targets = torch.LongTensor(np.load(base_path + 'labels.npy'))
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
test_loss, test_acc = test(net, test_loader)
corruption_accs.append(test_acc)
print('{}\n\tTest Loss {:.3f} | Test Error {:.3f}'.format(
corruption, test_loss, 100 - 100. * test_acc))
return np.mean(corruption_accs)
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4)])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
test_transform = preprocess
if args.dataset == 'cifar10':
train_data = datasets.CIFAR10(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR10(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-10-C/'
num_classes = 10
else:
train_data = datasets.CIFAR100(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR100(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-100-C/'
num_classes = 100
train_data = AugMixDataset(train_data, preprocess, args.no_jsd)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
# Create model
if args.model == 'densenet':
net = densenet(num_classes=num_classes)
elif args.model == 'wrn':
net = WideResNet(args.layers, num_classes, args.widen_factor, args.droprate)
elif args.model == 'allconv':
net = AllConvNet(num_classes)
elif args.model == 'resnext':
net = resnext29(num_classes=num_classes)
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay,
nesterov=True)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc = checkpoint['best_acc']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
# Evaluate clean accuracy first because test_c mutates underlying data
test_loss, test_acc = test(net, test_loader)
print('Clean\n\tTest Loss {:.3f} | Test Error {:.2f}'.format(
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
return
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda
step,
args.epochs * len(train_loader),
1, # lr_lambda computes multiplicative factor
1e-6 / args.learning_rate))
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
args.dataset + '_' + args.model + '_training_log.csv')
with open(log_path, 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
best_acc = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
begin_time = time.time()
train_loss_ema = train(net, train_loader, optimizer, scheduler)
test_loss, test_acc = test(net, test_loader)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
checkpoint = {
'epoch': epoch,
'dataset': args.dataset,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_time,
train_loss_ema,
test_loss,
100 - 100. * test_acc,
))
print(
'Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} |'
' Test Error {4:.2f}'
.format((epoch + 1), int(time.time() - begin_time), train_loss_ema,
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' %
(args.epochs + 1, 0, 0, 0, 100 - 100 * test_c_acc))
if __name__ == '__main__':
main()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base augmentations operators."""
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
# ImageNet code should change this value
IMAGE_SIZE = 32
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AllConv implementation (https://arxiv.org/abs/1412.6806)."""
import math
import torch
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def make_layers(cfg):
"""Create a single layer."""
layers = []
in_channels = 3
for v in cfg:
if v == 'Md':
layers += [nn.MaxPool2d(kernel_size=2, stride=2), nn.Dropout(p=0.5)]
elif v == 'A':
layers += [nn.AvgPool2d(kernel_size=8)]
elif v == 'NIN':
conv2d = nn.Conv2d(in_channels, in_channels, kernel_size=1, padding=1)
layers += [conv2d, nn.BatchNorm2d(in_channels), GELU()]
elif v == 'nopad':
conv2d = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=0)
layers += [conv2d, nn.BatchNorm2d(in_channels), GELU()]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.BatchNorm2d(v), GELU()]
in_channels = v
return nn.Sequential(*layers)
class AllConvNet(nn.Module):
"""AllConvNet main class."""
def __init__(self, num_classes):
super(AllConvNet, self).__init__()
self.num_classes = num_classes
self.width1, w1 = 96, 96
self.width2, w2 = 192, 192
self.features = make_layers(
[w1, w1, w1, 'Md', w2, w2, w2, 'Md', 'nopad', 'NIN', 'NIN', 'A'])
self.classifier = nn.Linear(self.width2, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n)) # He initialization
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
|
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
return self.fc(out)
|
"""DenseNet implementation (https://arxiv.org/abs/1608.06993)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
"""Bottleneck block for DenseNet."""
def __init__(self, n_channels, growth_rate):
super(Bottleneck, self).__init__()
inter_channels = 4 * growth_rate
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, inter_channels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(inter_channels)
self.conv2 = nn.Conv2d(
inter_channels, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
"""Layer container for blocks."""
def __init__(self, n_channels, growth_rate):
super(SingleLayer, self).__init__()
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
"""Transition block."""
def __init__(self, n_channels, n_out_channels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, n_out_channels, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
"""DenseNet main class."""
def __init__(self, growth_rate, depth, reduction, n_classes, bottleneck):
super(DenseNet, self).__init__()
if bottleneck:
n_dense_blocks = int((depth - 4) / 6)
else:
n_dense_blocks = int((depth - 4) / 3)
n_channels = 2 * growth_rate
self.conv1 = nn.Conv2d(3, n_channels, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
n_out_channels = int(math.floor(n_channels * reduction))
self.trans1 = Transition(n_channels, n_out_channels)
n_channels = n_out_channels
self.dense2 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
n_out_channels = int(math.floor(n_channels * reduction))
self.trans2 = Transition(n_channels, n_out_channels)
n_channels = n_out_channels
self.dense3 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
self.bn1 = nn.BatchNorm2d(n_channels)
self.fc = nn.Linear(n_channels, n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, n_channels, growth_rate, n_dense_blocks, bottleneck):
layers = []
for _ in range(int(n_dense_blocks)):
if bottleneck:
layers.append(Bottleneck(n_channels, growth_rate))
else:
layers.append(SingleLayer(n_channels, growth_rate))
n_channels += growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = self.fc(out)
return out
def densenet(growth_rate=12, depth=40, num_classes=10):
model = DenseNet(growth_rate, depth, 1., num_classes, False)
return model
|
"""ResNeXt implementation (https://arxiv.org/abs/1611.05431)."""
import math
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
class ResNeXtBottleneck(nn.Module):
"""ResNeXt Bottleneck Block type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)."""
expansion = 4
def __init__(self,
inplanes,
planes,
cardinality,
base_width,
stride=1,
downsample=None):
super(ResNeXtBottleneck, self).__init__()
dim = int(math.floor(planes * (base_width / 64.0)))
self.conv_reduce = nn.Conv2d(
inplanes,
dim * cardinality,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn_reduce = nn.BatchNorm2d(dim * cardinality)
self.conv_conv = nn.Conv2d(
dim * cardinality,
dim * cardinality,
kernel_size=3,
stride=stride,
padding=1,
groups=cardinality,
bias=False)
self.bn = nn.BatchNorm2d(dim * cardinality)
self.conv_expand = nn.Conv2d(
dim * cardinality,
planes * 4,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn_expand = nn.BatchNorm2d(planes * 4)
self.downsample = downsample
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)
bottleneck = self.conv_conv(bottleneck)
bottleneck = F.relu(self.bn(bottleneck), inplace=True)
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""ResNext optimized for the Cifar dataset, as specified in https://arxiv.org/pdf/1611.05431.pdf."""
def __init__(self, block, depth, cardinality, base_width, num_classes):
super(CifarResNeXt, self).__init__()
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 9 == 0, 'depth should be one of 29, 38, 47, 56, 101'
layer_blocks = (depth - 2) // 9
self.cardinality = cardinality
self.base_width = base_width
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.inplanes = 64
self.stage_1 = self._make_layer(block, 64, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 128, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 256, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, self.cardinality, self.base_width, stride,
downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes, planes, self.cardinality, self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnext29(num_classes=10, cardinality=4, base_width=32):
model = CifarResNeXt(ResNeXtBottleneck, 29, cardinality, base_width,
num_classes)
return model
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cascades API."""
# A new PyPI release will be pushed everytime `__version__` is increased
# When changing this, also update the CHANGELOG.md
__version__ = '0.3.2'
from cascades._src.distributions.base import UniformCategorical
from cascades._src.distributions.gpt import GPT
from cascades._src.distributions.choose import Choose
from cascades._src.distributions.strings import get_default_lm
from cascades._src.distributions.strings import mock_lm
from cascades._src.distributions.strings import set_default_lm
from cascades._src.distributions.strings import String
from cascades._src.handlers import factor
from cascades._src.handlers import log
from cascades._src.handlers import observe
from cascades._src.handlers import param
from cascades._src.handlers import reject
from cascades._src.handlers import rejection_sample
from cascades._src.handlers import sample
from cascades._src.inference import model
from cascades._src.interpreter import Interpreter
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for handlers."""
import math
from absl.testing import absltest
from cascades._src import handlers
import jax.numpy as jnp
from numpyro import distributions as np_dists
def _binomial(k, p=0.5):
total = 0
for _ in range(k):
flip = yield handlers.sample(np_dists.Bernoulli(probs=p))
total += flip
return total
def _flip_paths(n, a, b):
nheads = yield _binomial(n)
if int(nheads) != a:
yield handlers.reject(reason=f'nheads {nheads} != {a}')
nheads = yield _binomial(n)
if int(nheads) != b:
yield handlers.reject(reason=f'nheads {nheads} != {b}')
def _gaussian_mixture(locs):
"""Standard gaussian mixture model."""
n = len(locs)
mixing_dist = np_dists.Categorical(probs=jnp.ones(n) / n)
component_dist = np_dists.Normal(loc=jnp.array(locs), scale=jnp.ones(n))
mixture = np_dists.MixtureSameFamily(mixing_dist, component_dist)
return mixture
def gaussian_mixture_likelihood(proposal_loc=0.0,
proposal_scale=3.0,
mixture_locs=(-5.0, 5.0)):
"""Demonstrate proposing & scoring in same program."""
# Proposal distribution
proposal = yield handlers.sample(
name='proposal',
dist=np_dists.Normal(loc=proposal_loc, scale=proposal_scale))
mixture = _gaussian_mixture(mixture_locs)
# Add term to likelihood
yield handlers.sample(name='score', dist=mixture, obs=proposal)
return proposal
class SimpleTests(absltest.TestCase):
def test_likelihood_weighting(self):
"""Sample from normal, and weight using mixture."""
locs = [-5.0, 5.0]
def _fn(verbose=False):
del verbose
return gaussian_mixture_likelihood(mixture_locs=locs)
mixture = _gaussian_mixture(locs)
forward_sample_handler, result = handlers.forward_sample(fn=_fn, seed=0)
expected_score = mixture.log_prob(result['return_value'])
self.assertAlmostEqual(expected_score, result['observed_likelihood'])
self.assertEqual(forward_sample_handler.result, result['return_value'])
def test_paths_rejection_samping(self):
fn = lambda: _flip_paths(3, 1, 2)
fn = handlers.AutoName(fn) # Uniquely name each sample.
_, result = handlers.rejection_sample(fn=fn, seed=0, max_attempts=100)
effects = result['intermediates']
nheads = sum(eff.value for eff in effects)
self.assertEqual(3, int(nheads))
class RejectTest(absltest.TestCase):
def test_reject(self):
def _reject_test():
yield handlers.log('log1', 'Log 1')
yield handlers.reject(reason='rejected for no reason')
yield handlers.log('log2', 'Log 2')
_, result = handlers.forward_sample(fn=_reject_test, seed=0)
self.assertTrue(math.isinf(result['observed_likelihood']))
self.assertLess(result['observed_likelihood'], 0)
self.assertIsInstance(result['intermediates'][-1], handlers.Reject)
self.assertLen(result['intermediates'], 2)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Effect handlers which specify how to interpret yielded effects."""
import collections
import dataclasses
import math
import types
from typing import Any, Callable, Dict, List, Optional, Text, Type, Union
from cascades._src.distributions import base as dists
import immutabledict
import jax
Distribution = dists.Distribution
RandomSample = dists.RandomSample
class BaseModel:
"""Base class for Cascade models."""
def __init__(self,
model,
*args,
observe: Optional[Dict[str, Any]] = None,
name: Optional[str] = None,
**kwargs): # pylint: disable=redefined-outer-name
"""Wrap callable generator as a sampleable model.
Args:
model: Callable to wrap.
*args: Arguments passed as inputs to model.
observe: Values to condition when sampling.
name: Optional name used for scoping when nesting models.
**kwargs: Keyword arguments passed as inputs to model.
"""
self._model = model
self._name = name
self._args = args
self._kwargs = kwargs
self._observe = observe
def to_generator(self):
"""Create a generator from model.
Wraps effects in a NameScope, which automatically adds the model name to
and effect addresses.
TODO(ddohan): Add an Observer if `self._observe` is set as well.
Returns:
A generator which traces through the model.
"""
# Wrap into a name scope to get unique names
# Call the model to get a generator
gen = self._model(
*self._args,
**self._kwargs)
if self._name:
gen = NameScope(gen_fn=gen, scope=self._name)()
return gen
def __repr__(self):
model_name = self._model.__name__
return f'<{self.__class__.__name__}[{model_name}]>'
## Methods to run a generator.
def run_with_intermediates(gen):
"""Run generator to completion and return final value and intermediates.
Args:
gen: Generator.
Returns:
Dict with value = return_value and intermediates = yielded values.
"""
assert isinstance(gen, types.GeneratorType)
intermediates = []
while True:
try:
x = next(gen)
intermediates.append(x)
except StopIteration as e:
return_value = None
if e.args is not None and len(e.args):
return_value = e.args[0]
break
return dict(return_value=return_value, intermediates=intermediates)
def forward_sample(fn, seed, *args, **kwargs):
"""Sample once from fn(*args, **kwargs) with seed.
Args:
fn: Callable which creates a generator, yielding distributions or effects.
seed: Random seed to use.
*args: Args passed to fn.
**kwargs: Kwargs passed to fn.
Returns:
Tuple of (handlers, result), where handlers is the effect handler stack
used to interpret the model, and result is a dict containing `result` and
`intermediates`.
"""
# TODO(ddohan): add ApplyContext back in.
# gen_fn = ApplyContext(lambda: fn(*args, **kwargs))
gen_fn = lambda: fn(*args, **kwargs)
# TODO(ddohan): Add in ParamHandler
handler_fn = compose_handlers([Record, StopOnReject, Sampler, Observer])
forward_sample_handler: Record = handler_fn(Seed(seed=seed, gen_fn=gen_fn)) # pytype: disable=annotation-type-mismatch
result_with_metadata = forward_sample_handler.run_with_intermediates(
verbose=False)
result_with_metadata['observed_likelihood'] = (
forward_sample_handler.observed_likelihood)
return forward_sample_handler, result_with_metadata
def rejection_sample(fn, seed, max_attempts, *args, **kwargs):
"""Sample repeatedly until a success."""
rng = dists.get_rng(seed)
for i in range(max_attempts):
rng, subrng = jax.random.split(rng)
handlers, result = forward_sample(fn, subrng, *args, **kwargs)
observed_likelihood = handlers.observed_likelihood
if not math.isinf(observed_likelihood):
result['attempts'] = i + 1
return handlers, result
return None
## Helper methods to create effects within a model.
def log(value, name=None, factor=0.0): # pylint: disable=redefined-outer-name
"""Record a value into a trace with given log_prob factor."""
yield Log(
value=value,
fn=dists.Constant(value=value, factor=factor),
name=name or 'log')
def sample(dist=None, obs=None, name=None):
"""Sample value from distribution. Optionally takes observed value."""
effect = yield from sample_and_score(dist=dist, obs=obs, name=name)
return effect.value
def observe(dist=None, obs=None, name=None):
"""Observe that given distribution takes on observed value. Returns score."""
effect = yield from sample_and_score(dist=dist, obs=obs, name=name)
return effect.score
def sample_and_score(dist=None, obs=None, name=None):
"""Get RandomSample from distribution. Optionally takes an observed value."""
if obs is None:
effect = Sample(fn=dist, value=None, name=name)
else:
effect = Observe(fn=dist, value=obs, name=name)
returned_effect = yield effect
return returned_effect
def factor(score, name=None):
"""Add score to likelihood of current trace.
Args:
score: Numeric value to add to trace likelihood. Used to intervene in trace
weights.
name: Name for the effect.
Yields:
An Observe effect for the given Factor distribution.
"""
dist = dists.Factor(factor=score)
yield from sample(dist=dist, obs=dists.FactorSentinel(), name=name)
def reject(reason, name=None):
"""Add -inf term to likelihood of current trace."""
dist = dists.Factor(reason=reason, factor=-math.inf)
yield Reject(
fn=dist, value=dists.FactorSentinel, score=-math.inf, name=name or reason)
def param(name=None, dist=None, value=None):
"""Create a parameter. Samples from `dist` by default if value not given."""
effect = yield Param(fn=dist, value=value, name=name)
return effect.value
def _yielded_value_to_effect(value):
"""Convert a value to an effect."""
if isinstance(value, Effect):
return value
elif isinstance(value, dists.np_dists.Distribution):
name = str(value.__class__)
if hasattr(value, 'name'):
name = value.name
# pylint does not handle dataclass inheritance properly.
return Sample(fn=value, name=name) # pylint: disable=unexpected-keyword-arg
elif isinstance(value, Distribution):
return Sample(fn=value, name=value.name) # pylint: disable=unexpected-keyword-arg
else:
raise ValueError('Unknown effect type %s' % str(value))
## Basic Effect types.
@dataclasses.dataclass
class Effect:
"""Track state of an effect which is processed by EffectHandlers."""
# Unique name for site which yielded this effect.
name: Optional[Text] = None
# Results of sampling or scoring.
score: Optional[float] = dataclasses.field(repr=True, default=None)
value: Optional[Any] = dataclasses.field(repr=True, default=None)
# Callable, generally used for sampling and scoring.
fn: Optional[Union[Callable, Distribution]] = dataclasses.field( # pylint: disable=g-bare-generic
repr=False, default=None)
args: Optional[List[Any]] = dataclasses.field(
repr=False, default_factory=list)
kwargs: Optional[Dict[Text, Any]] = dataclasses.field(
repr=False, default_factory=dict)
# If True, then the generator should be halted
# generally used when likelihood becomes infinite.
should_stop: bool = False
replayed: bool = False
metadata: Optional[Any] = None
def __repr__(self):
kws = []
for k, v in self.__dict__.items():
if not self.__dataclass_fields__[k].repr:
continue
v = str(v)[:64]
s = f'{k}={v!r}'
kws.append(s)
return '{}({})'.format(type(self).__name__, ',\n'.join(kws))
@dataclasses.dataclass
class Sample(Effect):
"""A distribution together with its sampled value."""
pass
@dataclasses.dataclass
class Observe(Effect):
"""A distribution together with its observed value."""
pass
@dataclasses.dataclass
class Log(Observe):
"""Log information to the trace. May be used for conditioning model."""
value: Any = None
# Unique name for site which yielded this effect.
name: Optional[Text] = None
@dataclasses.dataclass
class Reject(Observe):
"""Reject a trace. Equivalent to -inf likelihood."""
pass
@dataclasses.dataclass
class Param(Observe):
"""Model parameters."""
pass
## Core effect handlers.
class EffectHandler:
"""Wraps generator fn and customizes how yielded effects are interpreted."""
def __init__(self, gen_fn=None):
"""Wraps given generator fn. May be None if self.__call__ is not used."""
self.gen_fn = gen_fn
self._context = None
def run_with_intermediates(self, fn_or_gen=None, verbose=None):
return run_with_intermediates(self(fn_or_gen=fn_or_gen, verbose=verbose))
def process(self, effect):
"""Applied before yield."""
return effect
def postprocess(self, effect):
"""Applied after yield."""
return effect
def on_return(self, return_value):
"""Runs on return from program."""
pass
def get_stack(self):
"""If wrapped gen_fn is an EffectHandler, get entire stack of handlers."""
handlers = []
handler = self
# TODO(ddohan): Unwrap functools.partial as well.
while isinstance(handler, EffectHandler):
handlers.append(handler)
handler = handler.gen_fn
return handlers
def __call__(self, fn_or_gen=None, verbose=False, nested=False):
"""Iterate generator fn, interpreting effects using (post)process.
Args:
fn_or_gen: If given, generator or callable returning a generator to trace.
If not given, defaults to self.gen_fn
verbose: If True, log out the trace.
nested: Used internally for recursive nested calls. Changes behavior of
Yields:
Effects
# Indent to satisfy pylint.
Returns:
Value returned from wrapped generator.
"""
def _log(x):
if verbose:
print(x)
if fn_or_gen is None:
fn_or_gen = self.gen_fn
if fn_or_gen is None:
raise ValueError(
'`gen_fn` must be passed as argument EffectHandler __init__ or __call__. Was None.'
)
if isinstance(fn_or_gen, types.GeneratorType):
# assert not args and not kwargs, (args, kwargs)
gen = fn_or_gen
else:
# Call function to get a generator back
gen = fn_or_gen()
value_to_inject = None
return_value = None
try:
while True:
_log(f'Injecting {value_to_inject}')
yielded_value = gen.send(value_to_inject)
_log(f'Yielded value: {yielded_value}')
if isinstance(yielded_value, BaseModel):
# If the yielded value is a cascades model, we need to
# trace into it.
yielded_value = yielded_value.to_generator()
if isinstance(yielded_value, types.GeneratorType):
# Recursively trace through yielded generator.
# TODO(ddohan): Support yielding functions as well.
value_to_inject = yield from self(
fn_or_gen=yielded_value, nested=nested + 1)
_log(f'received from yield: {value_to_inject}')
continue
else:
effect = _yielded_value_to_effect(yielded_value)
# Process & postprocess modify the effect in place
effect = self.process(effect)
if effect is None:
raise ValueError(f'Did not return effect from {self}')
returned_effect = yield effect
if returned_effect is not None:
effect = returned_effect
# TODO(ddohan): postprocess will be applied from outside in
# Do we want to somehow apply it inside out as well?
# This would require an additional yield.
effect = self.postprocess(effect)
if effect is None:
raise ValueError(f'Did not return effect from {self}')
if effect.should_stop:
return None
value_to_inject = effect
except StopIteration as e:
if e.args is not None and len(e.args):
return_value = e.args[0]
_log(e.args)
return return_value
return None
finally:
# Always ensure that any locally scoped state is unloaded.
# TODO(ddohan): Test that this behaves as expected for WithContext
# handlers when `nested=True`.
if not nested:
self.on_return(return_value)
def compose_handlers(handlers: List[Type[EffectHandler]]):
"""Compose together a list of handlers."""
if not handlers:
raise ValueError('Cannot compose an empty set of handlers.')
def init_handlers(gen_fn=None):
for handler_cls in handlers[::-1]:
if handler_cls is None:
continue
gen_fn = handler_cls(gen_fn=gen_fn)
return gen_fn
return init_handlers
class Record(EffectHandler):
"""Record effects into trace."""
def __init__(self, gen_fn=None):
super().__init__(gen_fn=gen_fn)
self.trace = collections.OrderedDict()
self.return_value = None
self.keys = []
self.observed_likelihood = 0.0
self.unobserved_likelihood = 0.0
self.done = False
self.inputs = None
self.metadata = None
def __getitem__(self, key):
"""Get variable from trace by name."""
return self.trace[key]
@property
def joint_likelihood(self):
return self.observed_likelihood + self.unobserved_likelihood
def __repr__(self):
kvs = [f' {k}: {v}' for k, v in self.trace.items()]
kvs = '\n'.join(kvs)
return f'Record(\n{kvs}\n)'
def on_return(self, return_value):
"""Runs on return from program."""
self.result = return_value
self.done = True
def process(self, effect):
if effect.name is None:
raise ValueError(
f'Cannot record trace for effect without a name: {effect}')
if effect.name in self.trace:
raise ValueError(f'Address `{effect.name}` is already in trace')
self.trace[effect.name] = effect
self.keys.append(effect.name)
if isinstance(effect, Observe):
if effect.score is not None:
self.observed_likelihood += effect.score
elif isinstance(effect, Sample):
self.unobserved_likelihood += effect.score
return effect
class Seed(EffectHandler):
"""Set `rng` kwarg for each effect."""
def __init__(self, gen_fn=None, seed=None):
super().__init__(gen_fn=gen_fn)
if isinstance(seed, int):
self.rng = jax.random.PRNGKey(seed=seed)
else:
self.rng = seed
def process(self, effect):
self.rng, subrng = jax.random.split(self.rng)
effect.kwargs['rng'] = subrng
return effect
class Sampler(EffectHandler):
"""Sample values from Distributions."""
def __init__(self, gen_fn=None, await_timeout=None):
super().__init__(gen_fn=gen_fn)
self._await_timeout = await_timeout
def process(self, effect):
if isinstance(effect, Sample):
if effect.value is None:
random_sample = dists.sample_distribution(
effect.fn,
*effect.args,
await_timeout=self._await_timeout,
**effect.kwargs)
effect.value = random_sample.value
effect.score = random_sample.log_p
return effect
class Observer(EffectHandler):
"""Compute score of Observe statements."""
def __init__(self,
gen_fn=None,
await_timeout=None,
rescore=True,
observed=None):
super().__init__(gen_fn=gen_fn)
self._await_timeout = await_timeout
self._rescore = rescore
self._observed = observed
def process(self, effect):
if self._observed and effect.name in self._observed:
# Inject observed value into the effect, for scoring below.
expected = self._observed[effect.name]
if isinstance(effect, Observe):
effect.value = expected
effect.score = None
elif isinstance(effect, Sample):
effect = Observe(**effect.__dict__)
effect.value = expected
effect.score = None
if isinstance(effect, Observe):
if effect.value is None:
raise ValueError(f'Observe with a None value: {effect}')
if isinstance(self._rescore, bool):
should_rescore = self._rescore
else:
should_rescore = effect.name in self._rescore
if should_rescore:
score = dists.score_distribution(
effect.fn,
effect.value,
await_timeout=self._await_timeout,
)
else:
score = None
effect.score = score
return effect
class StopOnReject(EffectHandler):
"""Mark that tracing should stop in event of inf likelihood."""
def process(self, effect):
if effect.score is not None:
should_stop = jax.numpy.any(jax.numpy.isinf(effect.score))
if jax.device_get(should_stop):
effect.should_stop = True
return effect
class NameScope(EffectHandler):
"""Prepend scope to variable name."""
def __init__(self, gen_fn=None, scope=None):
super().__init__(gen_fn=gen_fn)
self.scope = scope
def __repr__(self):
return f'<NameScope {self.gen_fn}>'
def process(self, effect):
name = effect.name
if name is None:
name = effect.fn.__class__.__name__
name = f'{self.scope}/{name}'
effect.name = name
return effect
class AutoName(EffectHandler):
"""Assign a unique name to each effect based on index in trace."""
def __init__(self, gen_fn=None):
super().__init__(gen_fn=gen_fn)
self.idx = 0
def __repr__(self):
return f'<AutoName {self.gen_fn}>'
def process(self, effect):
name = effect.name
if name is None:
name = effect.fn.__class__.__name__
name = f'{self.idx}/{name}'
effect.name = name
self.idx += 1
return effect
class Replay(EffectHandler):
"""Look up site values from given trace."""
def __init__(self,
gen_fn,
trace,
assert_all_used=True,
replay_scores=False,
rescore=False,
await_timeout=None):
super().__init__(gen_fn=gen_fn)
if not isinstance(trace, immutabledict.ImmutableOrderedDict):
trace = immutabledict.ImmutableOrderedDict(trace)
self.trace = trace
self._unused = set(k for k in trace)
self._assert_all_used = assert_all_used
if replay_scores and rescore:
raise ValueError('Cannot both replay and rescore log probs.')
self._replay_scores = replay_scores
self._rescore = rescore
self._await_timeout = await_timeout
def on_return(self, return_value):
"""Runs on return from program."""
if self._assert_all_used and self._unused:
raise ValueError(f'Some keys were unused in replay: {self._unused}')
def process(self, effect):
if effect.name in self.trace:
cached = self.trace[effect.name]
effect.value = cached.value
if self._replay_scores:
effect.score = cached.score
elif self._rescore:
effect.score = dists.score_distribution(
fn=effect.fn, value=effect.value, await_timeout=self._await_timeout)
effect.replayed = True
self._unused.remove(effect.name)
return effect
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sampler."""
from absl.testing import absltest
from cascades._src import handlers as h
from cascades._src import sampler
import jax.numpy as jnp
from numpyro import distributions as np_dists
def _binomial(k, p=0.5):
total = 0
for _ in range(k):
flip = yield h.sample(np_dists.Bernoulli(probs=p))
total += flip
return total
def _flip_paths(n, a, b):
nheads = yield _binomial(n)
if int(nheads) != a:
yield h.reject(reason=f'nheads {nheads} != {a}')
nheads = yield _binomial(n)
if int(nheads) != b:
yield h.reject(reason=f'nheads {nheads} != {b}')
def _gaussian_mixture(locs):
"""Standard gaussian mixture model."""
n = len(locs)
mixing_dist = np_dists.Categorical(probs=jnp.ones(n) / n)
component_dist = np_dists.Normal(loc=jnp.array(locs), scale=jnp.ones(n))
mixture = np_dists.MixtureSameFamily(mixing_dist, component_dist)
return mixture
def gaussian_mixture_likelihood(proposal_loc=0.0,
proposal_scale=3.0,
mixture_locs=(-5.0, 5.0)):
"""Demonstrate proposing & scoring in same program."""
# Proposal distribution
proposal = yield h.sample(
name='proposal',
dist=np_dists.Normal(loc=proposal_loc, scale=proposal_scale))
mixture = _gaussian_mixture(mixture_locs)
# Add term to likelihood
yield h.sample(name='score', dist=mixture, obs=proposal)
return proposal
class SamplerLikelihood(absltest.TestCase):
def test_likelihood_weighting(self):
"""Sample from normal, and weight using mixture."""
locs = [-5.0, 5.0]
def _fn(verbose=False):
del verbose
return gaussian_mixture_likelihood(mixture_locs=locs)
s = sampler.Sampler(
model=lambda: h.AutoName(_fn)(), rescore=True, reparam_fn=None) # pylint: disable=unnecessary-lambda
tracer: h.Record = s.build_tracer(0)
self.assertFalse(tracer.done)
sampler.reify(tracer)
mixture = _gaussian_mixture(locs)
expected_score = mixture.log_prob(tracer.return_value)
self.assertEqual(expected_score, tracer.observed_likelihood)
self.assertTrue(tracer.done)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default interpreter for cascade models.
Contains an implementation of basic functionality in a single effect
handler. Intended as a safe default interpreter and for easy modification for
custom functionality.
"""
import collections
import dataclasses
import math
from typing import Any, Dict, Optional
from cascades._src import handlers
import jax
def setup_seed(key_or_seed):
if isinstance(key_or_seed, int):
return jax.random.PRNGKey(seed=key_or_seed)
else:
return key_or_seed
def tape_to_dict(tape, stats):
"""Converts a tape from the interpreter to a map from variables to values."""
result = dict()
for name, effect in tape.items():
if (isinstance(effect, handlers.Sample) or
isinstance(effect, handlers.Observe) or
isinstance(effect, handlers.Log)):
result[name] = effect.value
for key, value in dataclasses.asdict(stats).items():
converted_key = '_' + str(key)
result[converted_key] = value
return result
def mode(list_):
return collections.Counter(list_).most_common(1)[0][0]
class Samples():
"""Set of samples produced from an inference algorithm."""
def __init__(self, variable_names=None, data=None):
self._variable_names = variable_names or collections.OrderedDict()
self._data = data or list()
def append_row(self, variables_dict):
self._variable_names.update({k: None for k in variables_dict.keys()})
this_data = {
name: variables_dict.get(name, None) for name in self._variable_names}
self._data.append(this_data)
def update_row(self, variables_dict):
"""Updates the most recent row based on the values in variables_dict."""
self._variable_names.update({k: None for k in variables_dict.keys()})
this_data = self._data[-1]
this_data.update(variables_dict)
def get_row(self, ri):
return dict(self._data[ri])
def get_column(self, column_name):
if column_name not in self._variable_names:
raise KeyError(column_name)
return [d.get(column_name, None) for d in self._data]
@property
def data(self):
return self._data
@property
def columns(self):
return self._variable_names.keys()
def size(self):
return len(self._data)
def update(self, other):
# `other` is of the same class, so this is OK.
# pylint: disable=protected-access
for ri in range(len(other._data)):
self.append_row(other._data[ri])
# pylint: enable=protected-access
def project(self, subvars):
variable_names = collections.OrderedDict([(k, None) for k in subvars])
rows = []
for ri in range(len(self._data)):
datum = self.get_row(ri)
rows.append({k: datum.get(k, None) for k in variable_names.keys()})
return Samples(variable_names, rows)
def group_by(self, column_name):
subgroups = collections.defaultdict(list)
for ri in range(len(self._data)):
key = self._data[ri][column_name]
subgroups[key].append(self._data[ri])
return [Samples(self._variable_names, v) for v in subgroups.values()]
@dataclasses.dataclass
class Stats():
likelihood_sampled: float = 0.0
likelihood_observed: float = 0.0
class InferenceHook():
"""Callbacks for the interpreter to implement inference algorithms."""
def __init__(self, await_timeout=60):
self._await_timeout = await_timeout
def handle_sample(self, effect, rng, stats):
raise NotImplementedError()
def handle_observe(self, effect, rng, stats):
raise NotImplementedError()
def handle_log(self, effect, rng, stats):
del rng, stats
# Do nothing by default. It's OK not to handle a log.
return effect
class Interpreter(handlers.EffectHandler):
"""Default effect handler for interpreting a cascades program.
Example for a simple random walk from a parameterized location:
```
def random_walk():
# 2 step random walk starting at `start` and ending at 0.
start = yield cc.param(name='start')
step1 = yield cc.sample(Normal(loc=start), name='step1')
yield cc.sample(Normal(loc=start + step1), obs=0.0, name='step2')
tracer = cc.Interpreter(random_walk, param_store=dict(start=2.0))
tracer.run()
```
"""
def __init__(self,
gen_fn,
seed: int = 0,
param_store: Optional[Dict[str, Any]] = None,
await_timeout: int = 60,
verbose: bool = False,
autoname: bool = True,
inference_hook: Optional[InferenceHook] = None):
"""Instantiate a cascades interpreter.
Args:
gen_fn: Callable generator.
seed: Random number seed.
param_store: Map from parameter name to value.
await_timeout: Length of time to wait for futures.
verbose: If True, log effects.
autoname: If True, automatically increments a unique id each time a
variable name reappears.
inference_hook: Called when events are received. This is useful for
implementing inference algorithms.
"""
super().__init__(gen_fn=gen_fn)
self._rng = setup_seed(seed)
self._tape = dict()
self._param_store = param_store or dict()
self._await_timeout = await_timeout
self._verbose = verbose
self._likelihood_sampled = 0.0
self._likelihood_observed = 0.0
self._autoname = autoname
self._name_counts = collections.defaultdict(int)
self._exception = None
self._inference_hook = inference_hook
self._stats = Stats()
# Track if this interpreter has been started already.
self._started = False
def run(self):
if self._started:
raise ValueError(
'`run` may only be called once per interpreter instance.')
self._started = True
gen = self()
for eff in gen:
if eff.should_stop:
break
pass
return self
def __getitem__(self, key):
"""Get item from trace tape by name."""
return self._tape[key]
@property
def stats(self):
return self._stats
def __repr__(self):
kvs = [f' {k}: {v}' for k, v in self._tape.items()]
kvs = ',\n'.join(kvs)
formatted = str(dict(stats=self.stats))
tape = f'{{\n{kvs}\n}}'
return formatted[:-1] + f",\n 'tape': {tape}}}"
def process(self, effect):
self._rng, subrng = jax.random.split(self._rng, 2)
def log(string):
if self._verbose:
print(string)
if not effect.name:
raise ValueError(f'Must name effect: {effect}')
if effect.name in self._tape:
if self._autoname:
self._name_counts[effect.name] += 1
idx = self._name_counts[effect.name]
effect.name = f'{effect.name}/{idx}'
else:
raise ValueError(f'Effect name is not unique: {effect.name}')
if isinstance(effect, handlers.Log):
# Logs will get added to the tape. Also ask the inference_hook
# if it wants to do anything.
if self._inference_hook:
effect = self._inference_hook.handle_log(effect, subrng, self._stats)
elif isinstance(effect, handlers.Reject):
effect.score = -jax.numpy.inf
self._stats.likelihood_observed = -jax.numpy.inf
elif isinstance(effect, handlers.Param):
# aka handlers.ParamStore
log('Param encountered: {effect}')
if effect.name in self._param_store:
log(f'Found parameter in store: {effect}')
effect.value = self._param_store[effect.name]
else:
if effect.value is None:
# Initialize by sampling from the distribution
log(f'Sampling new param value: {effect.name}')
if not effect.fn:
raise ValueError(f'Param fn cannot be None: `{effect}`')
random_sample = handlers.dists.sample_distribution(
fn=effect.fn, *effect.args, **effect.kwargs)
effect.value = random_sample.value
effect.log_p = random_sample.log_p
self._param_store[effect.name] = effect.value
elif isinstance(effect, handlers.Sample):
if self._inference_hook:
effect = self._inference_hook.handle_sample(effect, subrng, self._stats)
elif isinstance(effect, handlers.Observe):
if self._inference_hook:
effect = self._inference_hook.handle_observe(effect, subrng,
self._stats)
if effect.score is not None:
# Stop on Reject and infinite likelihoods
should_stop = jax.numpy.any(jax.numpy.isinf(effect.score))
if jax.device_get(should_stop):
effect.should_stop = True
# Record to a tape. aka handlers.Record
self._tape[effect.name] = effect
return effect
def on_return(self, return_value):
if 'return_value' in self._tape:
raise ValueError(
f'Cannot have `return_value` already recorded in tape: {self._tape.keys()}'
)
self._tape['return_value'] = return_value
def __call__(self, fn_or_gen=None, nested=False):
"""Improved exception handling around the base handler."""
try:
return_value = yield from super().__call__(
fn_or_gen=fn_or_gen, verbose=self._verbose, nested=nested)
return return_value
except Exception as e:
self._exception = e
print('Caught exception')
print(e)
self._tape['exception'] = e
self._likelihood_observed += -math.inf
# TODO(ddohan): Maintain more traceback context!
raise e
@property
def tape(self):
return self._tape
def try_sample(model, capture=False, **kwargs) -> Interpreter:
"""Sample from model, with error capture.
Args:
model: Cascade model fn to sample.
capture: If True, then capture and log exceptions.
**kwargs: Arguments passed to Interpreter
Returns:
Interpreter instance after running through program.
"""
tracer = Interpreter(model, **kwargs)
try:
tracer.run()
except Exception as e: # pylint: disable=broad-except
if capture:
print(f'Caught exception: {e}')
else:
raise e
return tracer
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interpreter."""
from absl.testing import absltest
from cascades._src import handlers
from cascades._src import interpreter
from numpyro import distributions as np_dists
class InjectObservationHook(interpreter.InferenceHook):
"""A simple hook that injects observations into a `sample` call."""
def __init__(self, observed):
self._observed = observed
def handle_sample(self, effect, rng, stats):
effect.kwargs['rng'] = rng
if effect.name in self._observed:
effect.value = self._observed[effect.name]
else:
random_sample = handlers.dists.sample_distribution(
effect.fn, *effect.args, **effect.kwargs)
effect.value = random_sample.value
effect.score = random_sample.log_p
return effect
class AutonameTest(absltest.TestCase):
def test_observe(self):
def foo():
dist = np_dists.Normal()
a = yield handlers.sample(name='a', dist=dist)
b = yield handlers.sample(name='a', dist=dist)
c = yield handlers.sample(name='a', dist=dist)
return a + b + c
simple_hook = InjectObservationHook(observed={'a': 100, 'a/2': -100})
tracer = interpreter.Interpreter(foo, inference_hook=simple_hook)
tracer.run()
self.assertEqual(100, tracer['a'].value)
self.assertEqual(-100, tracer['a/2'].value)
class ParamTest(absltest.TestCase):
def test_param(self):
# Example of a simple parameter store
def param_test():
param_value = yield handlers.param(name='test_param')
return param_value
param = 'TEST PARAMETER'
tracer = interpreter.Interpreter(
param_test, param_store=dict(test_param=param))
tracer.run()
self.assertEqual(param, tracer.tape['test_param'].value)
class SamplesTest(absltest.TestCase):
def test_project(self):
full_data = [dict(a=1, b=17, c=-1), dict(a=2, b=88, c=-42)]
full_samples = interpreter.Samples(full_data[0].keys(), full_data)
projected_a = full_samples.project(['a'])
self.assertEqual(dict(a=1), projected_a.get_row(0))
self.assertEqual(dict(a=2), projected_a.get_row(1))
self.assertEqual(projected_a.size(), full_samples.size())
self.assertEqual(['a'], list(projected_a._variable_names.keys()))
projected_bc = full_samples.project(['b', 'c'])
self.assertEqual(projected_bc.size(), full_samples.size())
self.assertEqual(['b', 'c'], list(projected_bc._variable_names.keys()))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample values from a model."""
from functools import partial # pylint: disable=g-importing-member
from typing import Any, Dict, Text, Optional
from cascades._src import handlers as h
import jax
# TODO(ddohan): Add goal conditioning (future prompting) back in.
def build_tracer(
fn,
seed: Any, # h.RNGKey,
trace=(),
reparam_fn=None,
observed=None,
rescore=False) -> h.Record:
"""Make a tracer which samples values and records to a tape.
Args:
fn: Function to trace.
seed: Seed for clone of trace.
trace: Trace of site name -> Effect to replay values.
reparam_fn: Partial Reparam handler which can be used to reparameterize
distributions.
observed: Dictionary of name to observed value.
rescore: Whether to rescore the observed samples.
Returns:
New handler stack instantiated around the function.
"""
# TODO(ddohan): Allow context handlers within program.
# gen_fn = h.ApplyContext(fn)
gen_fn = fn
handler_stack = [
# Record effects to a tape
h.Record,
# Sample/observe distributions & await futures
h.StopOnReject,
h.Sampler,
partial(h.Observer, rescore=rescore, observed=observed),
# Replace the known values when generator is rerun
partial(
h.Replay,
# Thread RNG through effect.
trace=trace,
replay_scores=True,
assert_all_used=True),
partial(h.Seed, seed=seed),
]
handler_fn = h.compose_handlers(handler_stack)
if reparam_fn is None:
reparam_fn = lambda x: x
record_handler: h.Record = handler_fn(reparam_fn(gen_fn))
return record_handler
class Sampler:
"""Run sampling on a model."""
def __init__(
self,
model, # pylint:disable=redefined-outer-name
# examples: Optional[Iterable[Dict[Text, Any]]] = None,
observe: Optional[Dict[Text, Any]] = None,
# future_prompt=False,
rescore=True,
reparam_fn=None,
):
self._model = model
# self._examples = examples or ()
self._observed = observe or dict()
# self._future_prompt = future_prompt
self._rescore = rescore
self._reparam_fn = reparam_fn
def build_tracer(self, seed=0):
"""Instantiate a tracer for given seed. Does not evalute concrete values."""
tracer = build_tracer(
self._model,
seed=seed,
# examples=self._examples,
observed=self._observed,
# future_prompt=self._future_prompt,
trace=(),
rescore=self._rescore,
reparam_fn=self._reparam_fn)
tracer.done = False
return tracer
def reify(self, seed=0):
tracer = self.build_tracer(seed=seed)
reify(tracer)
return tracer
def parallel(self, pool, seed, n):
"""Run function in parallel."""
seeds = jax.random.split(jax.random.PRNGKey(seed), n)
tracers = []
for seed in seeds:
tracer = self.build_tracer(seed=seed)
f = pool.submit(reify, tracer=tracer)
tracer.future = f
tracers.append(tracer)
return tracers
def reify(tracer: h.Record, verbose=False):
"""Run tracer and return the result.
Makes implicit values explicit.
Modifies `tracer` in place.
Args:
tracer: Tracer to run.
verbose: If true, log out effects.
Returns:
Result of running the program.
"""
it = tracer()
return_value = None
while True:
try:
eff = next(it)
if verbose:
print(eff)
except StopIteration as e:
if e.args is not None and len(e.args):
return_value = e.args[0]
if verbose:
print(f'Return value: {eff}')
break
tracer.done = True
tracer.return_value = return_value
return return_value
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for base."""
from absl.testing import absltest
import cascades as cc
from cascades._src.inference import base
import numpyro.distributions as np_dists
@base.model
def bernoulli_model(p=0.5):
x = yield cc.sample(name='flip',
dist=np_dists.Bernoulli(probs=p))
return x
@base.model
def binomial_model(k, p=0.5):
total = 0
for i in range(k):
flip = yield bernoulli_model(p=p, name=str(i))
total += flip
return total
class BaseTest(absltest.TestCase):
def test_model_call_model(self):
"""Test that nesting models properly handles scopes."""
trace = binomial_model(k=3, p=0.5).sample()
for k in ['0/flip', '0/return_value',
'1/flip', '1/return_value',
'2/flip', '2/return_value',
'return_value']:
self.assertIn(k, trace.trace)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rejection_sampling."""
from absl.testing import absltest
import cascades as cc
from cascades._src.inference import rejection_sampling
from numpyro import distributions as np_dists
def truncated_2step_gaussian(cutoff):
"""Two step random walk with rejection criteria."""
step1 = yield cc.sample(name='step 1', dist=np_dists.Normal(loc=0, scale=1))
step2 = yield cc.sample(name='step 2', dist=np_dists.Normal(loc=0, scale=1))
output = step1 + step2
if abs(output) >= cutoff:
yield cc.reject('Out of bounds')
yield cc.log(output, 'output')
return output
def binomial(k, p=0.5):
total = 0
for i in range(k):
flip = yield cc.sample(name=f'flip{i}', dist=np_dists.Bernoulli(probs=p))
total += flip
yield cc.log(total, 'total')
return flip
class RejectionSamplingTest(absltest.TestCase):
def test_rejection_sample(self):
"""Check rejection sampling on a binomial distribution."""
k = 3
num_samples = 500
s = rejection_sampling.RejectionSampling(
model=binomial, k=k, p=0.5, max_attempts=100, observed=dict(total=1))
samples = s.sample(num_samples=num_samples, seed=0)
# Check that the total is always what was observed
total_sampled = samples.get_column('total')
for value in total_sampled:
self.assertEqual(1, value)
# Check that each flip has the correct distribution (should be 0.333)
total = 0.0
for flip_id in range(k):
column = samples.get_column(f'flip{flip_id}')
total += sum(column)
self.assertAlmostEqual(1.0 / k, total / (k * num_samples), places=1)
def test_sample_with_manual_reject(self):
"""Check sampling handles cc.reject events."""
cutoff = 0.5
sampler = rejection_sampling.RejectionSampling(
model=truncated_2step_gaussian, max_attempts=100, cutoff=cutoff,
observed=dict())
samples = sampler.sample(num_samples=10, seed=0)
result = samples.get_column('output')
for y in result:
# The cutoff is small enough that rejection should always succeed.
self.assertIsNotNone(y)
self.assertLess(abs(y), cutoff)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs an inferencer over all of the elements in a dataset."""
from cascades._src import interpreter
from cascades._src.distributions import base as dists
from cascades._src.inference import base
import jax
import tqdm
class DatasetSampler(base.Inferencer):
"""Runs an inferencer over all of the elements in a dataset."""
def __init__(self, inferencer_class, **kwargs):
self._inferencer_class = inferencer_class
self._kwargs = kwargs
def sample(self, dataset, num_samples=1, seed=0):
rng = dists.get_rng(seed)
samples = interpreter.Samples()
self.debug_info = []
for inputs in tqdm.tqdm(dataset):
rng, subrng = jax.random.split(rng)
these_kwargs = dict(self._kwargs)
these_kwargs.update(inputs)
inferencer = self._inferencer_class(**these_kwargs)
these_samples = inferencer.sample(seed=subrng, num_samples=num_samples)
samples.update(these_samples)
self.debug_info.append(inferencer.debug_info)
return samples
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance sampling test."""
from absl.testing import absltest
from cascades._src import handlers as h
from cascades._src.distributions import base
from cascades._src.inference import importance_sampling
import jax.numpy as jnp
import numpyro.distributions as dists
def _score_fn_max(sofar, value):
"""Use the value as its own score."""
del sofar
return value
def _random_seq(vocab_size=10, length=3, score_fn=_score_fn_max):
"""Generate random sequences. Use `score_fn` as likelihood for each choice."""
seq = []
for i in range(length):
choice = yield h.sample(
dists.Categorical(logits=jnp.zeros(vocab_size)), name=f'choice/{i}'
)
if score_fn is not None:
value_score = score_fn(sofar=seq, value=choice)
yield h.sample(dist=base.Factor(factor=value_score), obs=choice,
name=f'score/{i}')
seq.append(int(choice))
return seq
class TracerTest(absltest.TestCase):
"""Test that the beam search tracer runs at all."""
def test_tracer(self):
weighted_traces = importance_sampling.sequential_importance_sampling(
_random_seq, seed=0, nsamples=5, max_rounds=10
)
self.assertIsInstance(weighted_traces, list)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for forward_sampling."""
from absl.testing import absltest
import cascades as cc
from cascades._src.inference import forward_sampling
from numpyro import distributions as np_dists
def binomial(k, p=0.5):
total = 0
for i in range(k):
flip = yield cc.sample(name=f'flip{i}', dist=np_dists.Bernoulli(probs=p))
total += flip
yield cc.log(total, 'total')
return flip
class ForwardSamplingTest(absltest.TestCase):
def test_binomial(self):
k = 3
num_samples = 500
inferencer = forward_sampling.ForwardSampling(model=binomial, k=k, p=0.5)
samples = inferencer.sample(num_samples=num_samples, seed=0)
# Check that the mean of total matches the binomial mean.
total_sampled = samples.get_column('total')
mean_total = sum(total_sampled) / float(num_samples)
self.assertAlmostEqual(k / 2.0, mean_total, places=1)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference algorithms."""
from cascades._src.inference.base import model
from cascades._src.inference.dataset_sampler import DatasetSampler
from cascades._src.inference.forward_sampling import ForwardSampling
from cascades._src.inference.likelihood_weighting import LikelihoodWeighting
from cascades._src.inference.rejection_sampling import RejectionSampling
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Likelihood weighting inference algorithm."""
from typing import Any, Dict
from cascades._src import handlers
from cascades._src import interpreter
from cascades._src.distributions import base as dists
from cascades._src.inference import base
import jax
# TODO(charlessutton): Provide easy way to get marginal likelihood.
class LikelihoodWeightingHook(interpreter.InferenceHook):
"""InferenceHook for the interpreter to support likelihood weighting."""
def __init__(self, observed, await_timeout=60):
super().__init__()
self._observed = observed
self._observed_used: Dict[str, bool] = {}
self._await_timeout = await_timeout
# effect is either a Sample or an Observe.
# TODO(charlessutton): Should we handle logs?
def handle_observe(self, effect: Any, rng: Any, stats: interpreter.Stats):
del rng
observed_value = self._observed[effect.name]
# TODO(charlessutton): Maybe raise an error if not all observed vars used.
# (This could be on a `done` callback.)
self._observed_used[effect.name] = True
effect.value = observed_value
effect.score = handlers.dists.score_distribution(
effect.fn,
effect.value,
await_timeout=self._await_timeout)
stats.likelihood_observed += effect.score
return effect
def handle_sample(self, effect: Any, rng: Any, stats: interpreter.Stats):
# A Sample effect is equivalent to an Observe if the name is in
# self._observe.
if effect.name in self._observed:
return self.handle_observe(effect, rng, stats)
effect.kwargs['rng'] = rng
if effect.value is None:
random_sample = handlers.dists.sample_distribution(
effect.fn,
*effect.args,
await_timeout=self._await_timeout,
**effect.kwargs)
effect.value = random_sample.value
effect.score = random_sample.log_p
if effect.score is not None:
stats.likelihood_sampled += effect.score
return effect
class LikelihoodWeighting(base.Inferencer):
"""Draw samples via likelihood weighting.
This is importance sampling, where the proposal distribution is the prior
distribution give by the cascade.
This will return a weighted Samples.
"""
def __init__(self, model, *args, observed=None, **kwargs):
super().__init__(model, *args, **kwargs)
if observed is None:
raise ValueError('No variables specified to observe.')
self._observed = observed
def sample(self, num_samples=1, seed=0):
"""Generate a sample via likelihood weighting."""
rng = dists.get_rng(seed)
samples = interpreter.Samples()
self._reset_debug_info()
for _ in range(num_samples):
rng, subrng = jax.random.split(rng)
model = self._model(*self._args, **self._kwargs)
tracer = interpreter.Interpreter(
model,
seed=subrng,
inference_hook=LikelihoodWeightingHook(self._observed))
tracer.run()
self.record_sample(samples, tracer)
return samples
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam search on the observed likelihood of a program.
TODO(ddohan):
- Deduplicate this Tracer with forward sampler in core.Tracer.
- Allow parallel expansions using a threadpool.
- replace Factor with ValueFunction(callabe)
Consider separate score function:
- beam_search_by_score(score_fn=all_scores)
- beam_search_by_score(score_fn=last_score)
"""
import dataclasses
from typing import Any, Callable, Dict, Generator, List, Optional, Text
from cascades._src import handlers as h
import jax
import shortuuid
Trace = Dict[Text, h.Effect]
# yield, send, return types
TraceGenerator = Generator[h.Effect, Any, Any]
def run_until_observe(gen: TraceGenerator):
"""Run generator until next observe statement."""
while True:
effect = next(gen)
if isinstance(effect, h.Observe):
return effect
def clone(fn, trace: Trace, seed, param_handler=None):
"""Clone trace of function with a new seed.
Args:
fn: Function to trace.
trace: Trace of site name -> Effect
seed: Seed for clone of trace.
param_handler: Effect handler to map parameter names to values.
Returns:
New handler stack instantiated around the function.
"""
# Allow context handlers within program.
handler_stack = [
# Record effects to a tape
h.Record,
param_handler,
# Sample/observe distributions & await futures
h.Sampler,
h.Observer,
]
handler_fn = h.compose_handlers(handler_stack)
handlers: h.Record = handler_fn(
# Replace the known values when generator is rerun
h.Replay(
# Thread RNG through effect.
gen_fn=h.Seed(seed=seed, gen_fn=fn),
trace=trace,
replay_scores=True,
assert_all_used=True,
)
)
return handlers
@dataclasses.dataclass
class Tracer:
"""Track state of tracers."""
handlers: h.Record
fn: Callable[[Any], Any]
gen: TraceGenerator
seed: Any
parent: Optional[Any] = dataclasses.field(repr=False, default=None)
result: Optional[Any] = None
done: bool = False
uid: Text = dataclasses.field(
repr=True, default_factory=lambda: shortuuid.uuid()[:8])
@classmethod
def new(cls, fn, seed, param_handler=None):
handlers = clone(
fn=fn, trace=dict(), seed=seed, param_handler=param_handler)
return Tracer(handlers=handlers, fn=fn, gen=handlers(), seed=seed)
def __hash__(self):
return hash(self.uid)
@property
def trace(self):
"""Get traces."""
return self.handlers.trace
def reify(self):
"""Run tracer to completion."""
while not self.done:
self.run_until_observe()
def clone(self, seed, fastforward=True):
"""Make a clone of the tracer.
Args:
seed: Seed for the tracer. Only applies past the end of the copied trace.
This is because the sites copied from the parent tracer are injected
from the replayed trace. Only locations not included in the original
trace make use of the new RNG seed.
fastforward: if True, then fastforward the tracer to end of existing trace
before returning.
Returns:
Cloned Tracer instance.
"""
handlers = clone(fn=self.fn, trace=self.trace, seed=seed)
tracer = Tracer(
handlers=handlers, fn=self.fn, gen=handlers(), seed=seed, parent=self)
if fastforward:
# Fastforward clone to same location.
for _ in range(len(self.handlers.trace)):
next(tracer.gen)
return tracer
def run_until_observe(self):
try:
return run_until_observe(self.gen)
except StopIteration as e:
if e.args is not None and len(e.args):
self.result = e.args[0]
self.done = True
def expand(tracer: Tracer, k: int) -> List[Tracer]:
"""Expand a tracer to `k` children.
Given tracer is cloned `k-1` times and then it and its children are run until
next observe.
Args:
tracer: Tracer to expand.
k: Number of expansions of the given tracer.
Returns:
List of tracers consisting of given tracer and its k-1 children, all
advanced for 1 step.
"""
seed = tracer.seed
if isinstance(seed, int):
seed = jax.random.PRNGKey(seed)
seeds = jax.random.split(seed, num=k - 1)
tracers = [tracer]
for seed in seeds:
# Clone the tracer k-1 times and fastforward each to same location
child = tracer.clone(seed=seed, fastforward=True)
tracers.append(child)
for tracer in tracers:
tracer.run_until_observe()
return tracers
def score_fn_last(tracer):
record: h.Record = tracer.handlers
most_recent_key: Text = record.keys[-1]
most_recent: h.Effect = record.trace[most_recent_key]
if not isinstance(most_recent, h.Observe):
raise ValueError(
'last_score_fn should be called immediately after an Observe')
return -most_recent.score
def score_fn_all(tracer):
return -tracer.handlers.observed_likelihood
def beam_search_by_score(fn,
seed,
score_fn=score_fn_last,
nbeams=5,
expansion_factor=3,
max_rounds=20,
return_intermediates=False):
"""Beam search on a function prioritized by score_fn.
TODO(ddohan): Reconsider how to handle completed traces when some may
terminate earlier than others.
Args:
fn: Cascade function to run.
seed: Seed for this run.
score_fn: Method to use to prioritize beams. `last_score_fn` prioritizes by
the last Observe and is appropriate for cost-to-go value functions, while
`all_score_fn` prioritizes by sum of observed likelihoods and is
appropriate for e.g. sampling from a language model using sum of
likelihoods as the score.
nbeams: Number of beams in active set.
expansion_factor: Number of expansions per beam in each step.
max_rounds: Maximum length of trajectories. Used to prevent runaway
generations.
return_intermediates: If True, return a list of intermediate tracers.
Returns:
If `return_intermediates` is false, list of highest scoring tracers. Else,
return dict with `intermediates` set and `completed` tracers.
"""
completed_traces = []
intermediates = set()
seeds = jax.random.split(jax.random.PRNGKey(seed), nbeams)
active_traces = [Tracer.new(fn=fn, seed=seed) for seed in seeds]
def step(tracers):
expansions = []
for root in tracers:
root_expansions = expand(root, k=expansion_factor)
expansions.extend(root_expansions)
return sorted(expansions, key=score_fn)
for _ in range(max_rounds):
if len(completed_traces) >= nbeams:
if return_intermediates:
return dict(intermediates=intermediates, completed=completed_traces)
else:
return completed_traces
expansions = step(active_traces)
if return_intermediates:
new = 0
for tracer in expansions:
if tracer not in intermediates:
new += 1
intermediates.add(tracer)
active_traces = []
for tracer in expansions:
if len(completed_traces) >= nbeams:
break
if tracer.done:
completed_traces.append(tracer)
else:
if len(active_traces) >= nbeams:
break
# TODO(ddohan): Consider adding additional clones if we run out of
# pending traces before getting `nbeams` active traces.
active_traces.append(tracer)
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_search."""
from absl.testing import absltest
from cascades._src import handlers as h
from cascades._src.distributions import base
from cascades._src.inference import beam_search
import jax.numpy as jnp
import numpyro.distributions as dists
def _score_fn_max(sofar, value):
"""Use the value as its own score."""
del sofar
return value
def _random_seq(vocab_size=10, length=3, score_fn=_score_fn_max):
"""Generate random sequences. Use `score_fn` as likelihood for each choice."""
seq = []
for i in range(length):
choice = yield h.sample(
dists.Categorical(logits=jnp.zeros(vocab_size)), name=f'choice/{i}'
)
if score_fn is not None:
value_score = score_fn(sofar=seq, value=choice)
yield h.sample(dist=base.Factor(factor=value_score), obs=choice,
name=f'score/{i}')
seq.append(int(choice))
return seq
class TracerTest(absltest.TestCase):
"""Test that the beam search tracer runs at all."""
def test_tracer(self):
t = beam_search.Tracer.new(fn=_random_seq, seed=0)
t.reify()
self.assertIsInstance(t.result, list)
class BeamSearchTest(absltest.TestCase):
"""Test that beam search runs."""
def _run_beam_search(self,
score_fn,
nbeams=3,
length=4,
factor=5,
intermediates=True):
result = beam_search.beam_search_by_score(
lambda: h.AutoName(_random_seq(length=length))(), # pylint: disable=unnecessary-lambda
seed=0,
score_fn=score_fn,
nbeams=nbeams,
expansion_factor=factor,
return_intermediates=intermediates,
)
# At each observe + (1 for the run until end),
# each of nbeams is cloned (factor - 1) times.
# Add in the original nbeams
expected = nbeams * (factor - 1) * (length + 1) + nbeams
if intermediates:
self.assertLen(result['intermediates'], expected)
completed = result['completed']
else:
completed = result
self.assertLen(completed, nbeams)
return result
def test_beam_search(self, nbeams=3, length=4, factor=5):
self._run_beam_search(
score_fn=beam_search.score_fn_last,
nbeams=nbeams,
length=length,
factor=factor)
def test_beam_search_all_scores(self, nbeams=3, length=4, factor=5):
self._run_beam_search(
score_fn=beam_search.score_fn_all,
nbeams=nbeams,
length=length,
factor=factor)
# def test_score_fns(self, nbeams=1, length=5, factor=5):
# all_beams = self._run_beam_search(
# score_fn=beam_search.score_fn_all,
# nbeams=nbeams,
# length=length,
# factor=factor,
# intermediates=False)
# last_beams = self._run_beam_search(
# score_fn=beam_search.score_fn_last,
# nbeams=nbeams,
# length=length,
# factor=factor,
# intermediates=False)
# print(all_beams[0].handlers.observed_likelihood)
# print(last_beams[0].handlers.observed_likelihood)
# # Note: Probabilistically true, but not strictly true in all worlds.
# self.assertGreater(all_beams[0].handlers.observed_likelihood,
# last_beams[0].handlers.observed_likelihood)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rejection sampling from a cascades model."""
import math
from typing import Any, Dict, Optional
from cascades._src import handlers
from cascades._src import interpreter
from cascades._src.distributions import base as dists
from cascades._src.inference import base
import jax
class RejectionSamplingHook(interpreter.InferenceHook):
"""Inference hook to allow interpreter to do rejection sampling."""
def __init__(self, observed, await_timeout=60):
super().__init__()
self._observed = observed
self._observed_used = dict()
self._await_timeout = await_timeout
# effect is either a Sample or an Observe.
# TODO(charlessutton): Should we handle logs?
def handle_observe(self, effect: Any, rng: Any, stats: interpreter.Stats):
observed_value = self._observed[effect.name]
# Generate the putative sample.
effect.kwargs['rng'] = rng
random_sampler = handlers.dists.sample_distribution(
effect.fn,
*effect.args,
await_timeout=self._await_timeout,
**effect.kwargs)
effect.value = random_sampler.value
if observed_value != random_sampler.value:
# Reject. Sample did not match the observation.
effect.score = -jax.numpy.inf
stats.likelihood_observed = -jax.numpy.inf
else:
# We did match, so we are allowed to continue.
effect.score = random_sampler.log_p
stats.likelihood_observed += effect.score
return effect
def handle_sample(self, effect: Any, rng: Any, stats: interpreter.Stats):
# A Sample effect is equivalent to an Observe if the name is in
# self._observe.
if effect.name in self._observed:
return self.handle_observe(effect, rng, stats)
# This wasn't an observe. Just sample the value.
effect.kwargs['rng'] = rng
random_sample = handlers.dists.sample_distribution(
effect.fn,
*effect.args,
await_timeout=self._await_timeout,
**effect.kwargs)
effect.value = random_sample.value
effect.score = random_sample.log_p
if effect.score is not None:
stats.likelihood_sampled += effect.score
return effect
def handle_log(self, effect: Any, rng: Any, stats: interpreter.Stats):
# A Log effect is equivalent to an Observe if the name is in
# self._observe.
if effect.name in self._observed:
return self.handle_observe(effect, rng, stats)
else:
return effect
class RejectionSampling(base.Inferencer):
"""Draw samples via rejection sampling."""
def __init__(self,
model,
*args,
max_attempts: int = 20,
observed: Optional[Dict[str, Any]] = None,
**kwargs):
"""Instantiate a rejection sampler for given model.
Args:
model: Model to run sampling on.
*args: Arguments passed to the model.
max_attempts: Maximum samples to attempt.
observed: Values to condition the model on.
**kwargs: Keyword args passed to the model.
Returns:
A sample from the model.
"""
super().__init__(model, *args, **kwargs)
if observed is None:
raise ValueError('No variables specified to observe.')
self._observed = observed
self._max_attempts = max_attempts
def sample(self, num_samples=1, seed=0):
"""Generate a sample via rejection sampling."""
rng = dists.get_rng(seed)
samples = interpreter.Samples()
for sample_idx in range(num_samples):
for attempt_idx in range(self._max_attempts):
rng, subrng = jax.random.split(rng)
model = self._model(*self._args, **self._kwargs)
tracer = interpreter.Interpreter(
model,
seed=subrng,
inference_hook=RejectionSamplingHook(self._observed))
tracer.run()
success = not math.isinf(tracer.stats.likelihood_observed)
if success: break
if success:
self.record_sample(samples, tracer)
samples.update_row(dict(num_attempts=1 + attempt_idx))
else:
# Rejection sampling failed. Add N/A values for everything.
samples.append_row(
dict(sample_idx=sample_idx, num_attempts=1 + attempt_idx))
return samples
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance sampling variants."""
from cascades._src.inference import beam_search
import jax
Tracer = beam_search.Tracer
def _normalize_weights(tracers):
weights = jax.numpy.array(
[tracer.handlers.observed_likelihood for tracer in tracers])
weight_sum = jax.scipy.special.logsumexp(weights)
normalized_weights = [w - weight_sum for w in weights]
return normalized_weights
def sequential_importance_sampling(fn, seed, nsamples=5, max_rounds=20):
"""Sequential importance sampling.
Args:
fn: Cascade function to run.
seed: Seed for this run.
nsamples: Number of beams in active set.
max_rounds: Maximum length of trajectories. Used to prevent runaway
generations.
Returns:
List of (weight, traces)
"""
completed_traces = []
seeds = jax.random.split(jax.random.PRNGKey(seed), nsamples)
active_traces = [Tracer.new(fn=fn, seed=seed) for seed in seeds]
for _ in range(max_rounds):
new_active_traces = []
for tracer in active_traces:
tracer.run_until_observe()
if tracer.done:
completed_traces.append(tracer)
else:
new_active_traces.append(tracer)
active_traces = new_active_traces
if not active_traces:
break
normalized_weights = _normalize_weights(completed_traces)
return list(zip(normalized_weights, completed_traces))
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for likelihood_weighting."""
import math
from absl.testing import absltest
import cascades as cc
from cascades._src.inference import likelihood_weighting
from numpyro import distributions as np_dists
def beta_binomial():
pi = yield cc.sample(
name='pi', dist=np_dists.Beta(concentration0=2.0, concentration1=1.0))
y = yield cc.sample(name='y', dist=np_dists.Bernoulli(probs=pi))
return y
class LikelihoodWeightingTest(absltest.TestCase):
def test_binomial_marginal_likelihood(self):
num_samples = 100
inferencer = likelihood_weighting.LikelihoodWeighting(
beta_binomial, observed=dict(y=1))
samples = inferencer.sample(num_samples=num_samples, seed=0)
log_probs = samples.get_column('_likelihood_observed')
probs = [math.exp(lp) for lp in log_probs]
marginal_probability = sum(probs) / float(num_samples)
self.assertAlmostEqual(1.0 / 3.0, marginal_probability, places=1)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base types for inference."""
from concurrent import futures
import functools
from typing import Any, Dict, Optional, Sequence
from cascades._src import handlers
from cascades._src import interpreter
from cascades._src import sampler as sampler_lib
import jax
class Inferencer():
"""Base class for inference algorithms."""
_model: Any # Really Callable[...,Generator[X, Y, Z]] but not yet.
_args: Sequence[Any]
_kwargs: Dict[str, Any]
def __init__(self, model, *args, **kwargs): # pylint: disable=redefined-outer-name
self._model = model
self._args = args
self._kwargs = kwargs
self._reset_debug_info()
def _reset_debug_info(self):
self._tapes = []
self.debug_info = dict()
def record_sample(self, samples, tracer, **kwargs):
datum = interpreter.tape_to_dict(tracer.tape, tracer.stats)
samples.append_row(datum)
samples.update_row(self._kwargs)
samples.update_row(dict(**kwargs))
self._tapes.append(tracer.tape)
self.debug_info['tapes'] = self._tapes
def wrap_with_return_logging(fn):
"""Wrap a model function such that the return value is logged."""
@functools.wraps(fn)
def wrapped_fn(*args, **kwargs):
ret_val = yield from fn(*args, **kwargs)
yield handlers.log(value=ret_val, name='return_value')
return ret_val
return wrapped_fn
def model(fn):
"""Decorator which wraps model around a function which creates a generator.
The output of sampling from the model is included in the trace as
`return_value`.
```
@model
def sampling_fn(k=3, p=0.6):
output = yield Binomial(k=k, p=p)
return output
# Use default arguments.
sampling_fn.sample(seed=1)
# Apply nondefault argument, then sample.
sampling_fn(k=5, p=0.9).sample(seed=1)
```
Args:
fn: Generator function to wrap into a sampleable model.
Returns:
Function wrapped into a Model. May directly call `.sample(seed)`, or apply
new arguments then sample: `ret_model(*args, **kwargs).sample(seed)`
"""
partial_model = functools.partial(SampledModel, wrap_with_return_logging(fn))
def map_kwargs(pool, seed, kwargs_list):
"""Map model sampling across a list of inputs."""
tracers = []
for kwargs in kwargs_list:
configured_model = partial_model(**kwargs)
tracer = configured_model.sample(pool=pool, seed=seed)
tracer.kwargs = kwargs
tracers.append(tracer)
return tracers
def sample(seed: int = 0, pool: Optional[futures.ThreadPoolExecutor] = None):
return partial_model().sample(seed=seed, pool=pool)
def sample_parallel(pool: futures.ThreadPoolExecutor,
seed: int = 0,
n: int = 1):
return partial_model().sample_parallel(seed=seed, pool=pool, n=n)
partial_model.map = map_kwargs
partial_model.sample = sample
partial_model.sample_parallel = sample_parallel
partial_model.__name__ = fn.__name__
return partial_model
class SampledModel(handlers.BaseModel):
"""Base class for sampling from cascade models."""
def sample(
self,
seed: int = 0,
pool: Optional[futures.ThreadPoolExecutor] = None) -> handlers.Record:
"""Sample a trace from the model.
Args:
seed: Random seed.
pool: Optional threadpool for parallel execution.
Returns:
a Record tracer.
"""
sampler = sampler_lib.Sampler(
model=functools.partial(self._model, *self._args, **self._kwargs),
observe=self._observe)
if pool:
tracer = sampler.build_tracer(seed)
f = pool.submit(sampler_lib.reify, tracer=tracer)
tracer.future = f
else:
tracer = sampler.reify(seed=seed)
return tracer
def sample_parallel(self,
pool: futures.ThreadPoolExecutor,
seed: int = 0,
n: int = 1):
"""Sample `n` tracers in parallel."""
seeds = jax.random.split(jax.random.PRNGKey(seed), n)
tracers = [self.sample(seed=seed, pool=pool) for seed in seeds]
return tracers
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple forward sampling from a model."""
from typing import Any
from cascades._src import handlers
from cascades._src import interpreter
from cascades._src.distributions import base as dists
from cascades._src.inference import base
import jax
class ForwardSamplingHook(interpreter.InferenceHook):
"""Inference hook to support simple forward sampling."""
def handle_observe(self, effect: Any, rng: Any, stats: interpreter.Stats):
del rng, stats
# Silently return the event. This will ensure that the observed value
# is clamped to what it is supposed to be.
# TODO(charlessutton): Might need to handle rescore observed.
return effect
def handle_sample(self, effect: Any, rng: Any, stats: interpreter.Stats):
effect.kwargs['rng'] = rng
if effect.value is None:
random_sample = handlers.dists.sample_distribution(
effect.fn,
*effect.args,
await_timeout=self._await_timeout,
**effect.kwargs)
effect.value = random_sample.value
effect.score = random_sample.log_p
if effect.score is not None:
stats.likelihood_sampled += effect.score
return effect
class ForwardSampling(base.Inferencer):
"""Draw samples via forward sampling.
This class does not allow or handle observations.
"""
def sample(self, num_samples=1, seed=0):
"""Generate a sample via forward sampling."""
rng = dists.get_rng(seed)
samples = interpreter.Samples()
self._reset_debug_info()
for sample_idx in range(num_samples):
rng, subrng = jax.random.split(rng)
model = self._model(*self._args, **self._kwargs)
tracer = interpreter.Interpreter(
model, seed=subrng, inference_hook=ForwardSamplingHook())
tracer.run()
self.record_sample(samples, tracer, sample_idx=sample_idx)
return samples
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for base distributions."""
import dataclasses
import random
from absl.testing import absltest
from cascades._src.distributions import base
@dataclasses.dataclass(eq=True, frozen=True)
class RandomFactor(base.Distribution):
"""Randomized likelihood for testing purposes."""
def sample(self, rng):
del rng
return base.RandomSample(value=None, log_p=self.score(None))
def score(self, value):
del value
return random.randint(0, 100_000_000)
class BaseTest(absltest.TestCase):
def test_lambda(self):
fn = lambda: 5
dist = base.Lambda(fn=fn)
sample = dist.sample(0)
self.assertEqual(5, sample.value)
def test_mem_lambda(self):
"""Test memoizing a lambda distribution."""
fn = lambda: random.randint(0, 100_000_000)
dist = base.Lambda(fn=fn)
dist = base.Mem(dist=dist)
v1 = dist.sample(0).value
v2 = dist.sample(0).value
v3 = dist.sample(1).value
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
def test_mem_sample_and_score(self):
"""Test memoizing a randomized sample & score distribution."""
dist = RandomFactor()
dist = base.Mem(dist=dist)
v1 = dist.sample(0).score
v2 = dist.sample(0).score
v3 = dist.sample(1).score
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
v1 = dist.score('abc')
v2 = dist.score('abc')
v3 = dist.score('xyz')
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Choose k items from a set."""
import dataclasses
from typing import Any, List, Tuple, Union
from cascades._src.distributions import base
import jax
import jax.numpy as jnp
def _sample_log_probs(rng, log_probs, k=None):
# Sample categorical distribution of log probs
noise = jax.random.gumbel(rng, shape=log_probs.shape)
perturbed = log_probs + noise
if k is None:
return jnp.argmax(perturbed, axis=-1)
else:
return jnp.argsort(perturbed, axis=-1)[:k]
@dataclasses.dataclass(frozen=True, eq=True)
class Choose(base.Distribution):
"""Choose k of n options. Uses gumbel top-k trick."""
k: int = 1
options: Union[Tuple[Any], List[Any]] = tuple() # pytype: disable=annotation-type-mismatch
def sample(self, rng) -> base.RandomSample:
rng = base.get_rng(rng)
n = len(self.options)
log_p = jnp.log(1.0 / n)
log_probs = jnp.full((n,), fill_value=log_p)
chosen_idxs = _sample_log_probs(rng, log_probs=log_probs, k=self.k)
chosen = []
for idx in chosen_idxs:
chosen.append(self.options[idx])
return_value = base.RandomSample(value=chosen, log_p=log_p * self.k)
return return_value
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for string distributions."""
from absl.testing import absltest
from cascades._src import handlers as h
from cascades._src import sampler
from cascades._src.distributions import strings
class StringsTest(absltest.TestCase):
def test_mock_lm(self):
def fn():
a = yield h.sample(
strings.String(lm=strings.mock_lm('ABC'), until=None, k=2), name='a')
b = yield h.sample(
strings.String(lm=strings.mock_lm('XYZ'), until=None, k=1), name='b')
return a + b
s = sampler.Sampler(fn)
trace = s.reify(seed=0)
self.assertEqual('ABCABCXYZ', trace.return_value)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic distribution types and helpers."""
from concurrent import futures
import dataclasses
import functools
import math
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import cachetools
import jax
import jax.numpy as jnp
import numpy as np
from numpyro import distributions as np_dists
import shortuuid
DEFAULT_TIMEOUT = 60
## Base Distribution and RandomSample types.
@dataclasses.dataclass(eq=True, frozen=True)
class Distribution:
"""Each Distribution implements at least `sample` and `score`."""
# stand in for kw_only in py3.10
capture: Any = dataclasses.field(repr=False, default=None)
name: Any = dataclasses.field(repr=True, default=None)
observed: Any = dataclasses.field(repr=True, default=None)
def __post_init__(self):
if self.capture is not None:
raise ValueError('Use kwargs for all arguments.')
def sample(self, rng):
"""Draw a sample from the distribution."""
raise NotImplementedError
def prob(self, value):
"""Score a sample by probability under the distribution."""
raise NotImplementedError
def log_prob(self, value):
"""Score a sample by log probability under the distribution."""
return jax.numpy.log(self.prob(value))
def score(self, value):
"""Score sample. Default to `log_prob` but may be any unnormalized value."""
return self.log_prob(value)
def support(self):
"""Possible values which can be drawn from the distribution.
Not easily defined for all distributions.
"""
raise NotImplementedError
@dataclasses.dataclass(eq=True, frozen=True)
class Unbatched(Distribution):
"""Marker that a sample has been unbatched from an underlying distribution."""
dist: Any = None
idx: Optional[int] = None # Index of the item within the batch.
# TODO(ddohan): Can it both be frozen & do post_init casting?
@dataclasses.dataclass(frozen=True, eq=True)
class RandomSample:
"""A value with an optional score (log_p) and source distribution."""
# stand in for kw_only in py3.10
capture: Any = dataclasses.field(repr=False, default=None)
log_p: Optional[Union[float, jax.Array]] = None
value: Any = None
dist: Optional[Union[Distribution, functools.partial]] = None
def __post_init__(self):
if self.capture is not None:
raise ValueError('Use kwargs for all arguments.')
def unbatch_(self):
"""Unpack a RandomSample of a list to a list of RandomSamples."""
unbatched = []
for (log_p, value) in zip(self.log_p, self.value):
sample = RandomSample(
capture=None,
log_p=log_p,
value=value,
dist=Unbatched(dist=self.dist))
unbatched.append(sample)
return unbatched
@property
def score(self):
"""Potentially unnormalized score. Defaults to log_p."""
return self.log_p
## Helper methods
# TODO(ddohan): Consider moving get_rng to separate file.
def get_rng(seed):
"""Get jax prng key from seed. Does nothing if seed is already a PRNGKey."""
if isinstance(seed, int):
return jax.random.PRNGKey(seed)
# Seed is already a jax.random.PRNGKey, so we just pass it through.
return seed
def score_distribution(fn: Union[Distribution, np_dists.Distribution],
value: Any,
await_timeout: Optional[int] = None):
"""Score value under distribution.
Args:
fn: Object which defines a `score` or `log_prob` method.
value: Value to score.
await_timeout: Length of time to wait. Defaults to DEFAULT_TIMEOUT.
Returns:
Float likelihood of value under distribution.
"""
if hasattr(fn, 'score'):
score_fn = fn.score
elif hasattr(fn, 'log_prob'):
score_fn = fn.log_prob
else:
raise ValueError('Must defined `score` or `log_prob` methods.')
if not callable(score_fn):
raise ValueError(f'Score method {score_fn} is not callable on {fn}')
score = score_fn(value)
if isinstance(score, futures.Future):
score = score.result(timeout=await_timeout or DEFAULT_TIMEOUT)
return score
def sample_distribution(fn, *args, await_timeout=None, **kwargs):
"""Sample value from function or distribution.
If `fn` comes from third party distribution library (e.g tf distributions or
NumPyro), then sample using library specific method. Otherwise, if fn is
callable, draw a sample by calling fn.
Args:
fn: Callable or object which defines a `score` or `log_prob` method.
*args: Args passed to sample fn.
await_timeout: Length of time to wait. Defaults to DEFAULT_TIMEOUT.
**kwargs: Kwargs passed to sample fn.
Returns:
Sampled value, with (potentially unnormalized) likelihood.
"""
if isinstance(fn, np_dists.Distribution):
# Numpyro distributions.
key = kwargs['rng']
kwargs['key'] = key
del kwargs['rng']
value = fn.sample(*args, **kwargs)
log_p = fn.log_prob(value)
return RandomSample(value=value, log_p=log_p)
elif hasattr(fn, 'sample') and callable(fn.sample):
fn = fn.sample
random_sample = fn(*args, **kwargs)
if isinstance(random_sample, futures.Future):
random_sample = random_sample.result(
timeout=await_timeout or DEFAULT_TIMEOUT)
if not isinstance(random_sample, RandomSample):
raise ValueError(
f'Expected sample to return RandomSample. Got {random_sample}')
return random_sample
## Factor distributions.
# TODO(ddohan): Consider switching sentinel to 0-dim array
class FactorSentinel:
pass
@dataclasses.dataclass(eq=True, frozen=True)
class Factor(Distribution):
"""Add given `score` to observed likelihood.
Used to manually add terms to model likelihood. Always returns the given
`score` for `observe`.
"""
factor: float = 0.0
reason: Any = None
def sample(self, rng):
del rng
return RandomSample(value=FactorSentinel(), log_p=self.factor)
def score(self, value):
del value
return self.factor
@dataclasses.dataclass(eq=True, frozen=True)
class Constant(Distribution):
"""Return a constant, with fixed log_prob factor."""
value: Any = None
factor: float = 0.0 # Constant to use as score
def sample(self, rng):
del rng
return RandomSample(value=self.value, log_p=self.factor)
def score(self, value):
if value == self.value:
return self.factor
else:
return -math.inf
## Discrete distributions
@dataclasses.dataclass(frozen=True, eq=True)
class UniformCategorical(Distribution):
"""Return a random choice from the distribution."""
options: Tuple[Any] = tuple() # pytype: disable=annotation-type-mismatch
def sample(self, rng=None) -> RandomSample:
idx = jax.random.randint(rng, (), 0, len(self.options))
idx = int(idx)
sample = self.options[idx]
log_p = -jnp.log(len(self.options))
return_value = RandomSample(value=sample, log_p=log_p)
return return_value
def log_prob(self, value):
del value
# TODO(ddohan): Assert that sample is one of the options
log_p = -jnp.log(len(self.options))
return log_p
def support(self):
return self.options
# Higher order distributons
def _rng_hash(rng):
"""Hash an rng key."""
if isinstance(rng, int):
return hash(rng)
else:
return hash(tuple(np.asarray(rng)))
def _mem_sample_key(self, rng):
"""Cache key for Mem distribution."""
rng = _rng_hash(rng)
h = hash((rng, self))
return h
def _mem_score_key(self, value):
"""Cache key for Mem distribution."""
h = hash((value, self))
return h
# TODO(ddohan): Consider sharing cache across instances.
@dataclasses.dataclass(frozen=True, eq=True)
class Mem(Distribution):
"""Add a cache to a distribution so that repeated calls are memoized."""
dist: Optional[Distribution] = dataclasses.field(repr=True, default=None)
# Mem should not be equal unless they are really the same
# generate a unique UID to ensure this property.
uid: str = dataclasses.field(
repr=True, default_factory=lambda: shortuuid.uuid()[:8])
@cachetools.cached(cache=cachetools.LRUCache(maxsize=100_000),
key=_mem_sample_key)
def sample(self, rng):
return self.dist.sample(rng=rng)
@cachetools.cached(cache=cachetools.LRUCache(maxsize=100_000),
key=_mem_score_key)
def score(self, value):
return self.dist.score(value)
@dataclasses.dataclass(frozen=True, eq=True)
class Lambda(Distribution):
"""Wrap a function as distribution."""
fn: Any = dataclasses.field(default=None, hash=None)
args: Optional[Iterable[Any]] = None
kwargs: Optional[Dict[str, Any]] = None
def sample(self, rng):
del rng
args = self.args or []
kwargs = self.kwargs or {}
value = self.fn(*args, **kwargs)
return RandomSample(value=value, log_p=0.0)
def score(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
raise NotImplementedError('Scoring from Lambda is not available.')
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""String distributions."""
from concurrent import futures
import dataclasses
import math
from typing import Any, Text
from cascades._src.distributions import base as dists
DEFAULT_LM_CLIENT = dict(default=None)
def set_default_lm(lm):
"""Set the default language model client."""
DEFAULT_LM_CLIENT['default'] = lm
def get_default_lm():
"""Return a default language model client."""
lm = DEFAULT_LM_CLIENT.get('default')
if not lm:
raise ValueError('No default LM set.')
return lm
def removeafter(text: Text, token: Text = '==='):
"""Truncate `text` after until `token`."""
idx = text.find(token)
if idx >= 0:
text = text[:idx + len(token)]
return text
else:
return None
def mock_lm(response: Text) ->...:
"""Return Distribution class which will return a fixed response."""
@dataclasses.dataclass(frozen=True)
class MockLM(dists.Distribution):
prompt: Text = ''
def sample(self, rng):
del rng
return dists.RandomSample(value=response, log_p=-1.0)
def prob(self, value):
return 1.0
return MockLM
# TODO(ddohan): Use cascade machinery + yields here as well
def iterate_until(rng,
prompt: Text,
lm,
k: int = 2,
until: Text = '\n',
reject_if_missing=True,
timeout=60,
include_until=False,
rescore=True):
"""Sample from language model up to `k` times.
Stop at given `until` token.
Args:
rng: Random seed.
prompt: Prefix text to condition on.
lm: Language model distribution to use.
k: Number of times to iterate the sampling.
until: Sample until this token is found.
reject_if_missing: If True, then reject the trace if `until` is not found.
timeout: Maximum time to wait for a generation.
include_until: If True, include the `until` token in the returned
generation.
rescore: If true, then rescores the generation after truncation, as an
`Observe`
Returns:
None if rejected, otherwise the generated text.
"""
full_generation = ''
likelihood = 0.0
for _ in range(k):
current_prompt = prompt + full_generation
step_lm = lm(prompt=current_prompt)
generation = step_lm.sample(rng=rng)
if isinstance(generation, futures.Future):
generation = generation.result(timeout)
full_generation += generation.value
likelihood += generation.log_p
if until:
clipped = removeafter(text=full_generation, token=until)
if clipped is not None:
if rescore:
# print(f"rescore: `{clipped}`")
score = lm(prompt=prompt).score(clipped)
if isinstance(score, futures.Future):
score = score.result(timeout)
else:
score = 0.0
# preclip = clipped
clipped = clipped[:-len(until)]
if include_until:
clipped += until
# print(f'`{preclip}` -> `{clipped}`')
return dists.RandomSample(log_p=score, value=clipped)
if until and reject_if_missing:
# We were looking for something and we didn't find it.
# Use a -inf likelihood
likelihood = -math.inf
return dists.RandomSample(log_p=likelihood, value=full_generation)
@dataclasses.dataclass(eq=True, frozen=True)
class String(dists.Distribution):
"""Sample a String from an LM. May iterate calls to the LM multiple times."""
prompt: Text = ''
k: int = 2
until: Text = '\n'
include_until: bool = False
lm: Any = None
timeout: int = 60
reject_if_missing: bool = True
def _get_lm(self):
return self.lm or get_default_lm()
def sample(self, rng):
rng = dists.get_rng(rng)
value = iterate_until(
rng=rng,
lm=self._get_lm(),
prompt=self.prompt,
reject_if_missing=self.reject_if_missing,
until=self.until,
k=self.k,
timeout=self.timeout,
include_until=self.include_until)
return value
def log_prob(self, value):
if not isinstance(value, str):
value = f' {value}'
if self.until:
value = value + self.until
return self._get_lm()(prompt=self.prompt).score(sample=value)
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sampling from OpenAI api."""
import bisect
import dataclasses
import functools
import os
from typing import Iterable, Optional, Text
import uuid
from cascades._src.distributions import base as dists
import jax
import openai
api_key = os.getenv('OPENAI_API_KEY')
if api_key:
openai.api_key = api_key
# TODO(ddohan): Persist cache to disk
@functools.lru_cache()
def cached_completion(rng=None, **kwargs):
del rng
return openai.Completion.create(**kwargs)
@dataclasses.dataclass(eq=True, frozen=True)
class GPT(dists.Distribution):
"""Sample a String from GPT."""
prompt: Text = ''
stop: Optional[Iterable[Text]] = ('\n',)
engine: str = 'davinci-codex'
temperature: float = 0.7
max_tokens: int = 128
top_p: float = .95
frequency_penalty: int = 0
presence_penalty: int = 0
def sample(self, rng=None, raw=False):
"""Sample a value from the distribution given rng key.
Args:
rng: Optional random key.
raw: If True, return OpenAI api request verbatim.
Returns:
a RandomSample or, if raw is True, the OpenAI response dict.
"""
if rng is None:
rng = uuid.uuid4().hex
elif isinstance(rng, jax.Array):
rng = jax.random.randint(rng, (), 0, 1_000_000_000)
rng = int(rng)
elif not isinstance(rng, int):
raise ValueError(f'RNG must be an integer or Jax key. Was {rng}')
result = cached_completion(
rng=rng,
model=self.engine,
prompt=self.prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
logprobs=0,
stop=self.stop,
echo=False,
)['choices'][0]
completion = result['text']
logprobs = result['logprobs']
span = (0, len(completion) + 1)
start = 0
end = bisect.bisect_left(logprobs['text_offset'], span[1])
total_logprob = sum(result['logprobs']['token_logprobs'][start:end])
if raw:
return (total_logprob, result)
return dists.RandomSample(log_p=total_logprob, value=completion)
def log_prob(self, value, raw=False):
"""Get log prob of completion.
Args:
value: Completion to score.
raw: If True, return raw OpenAI API response.
Returns:
float logprob or the OpenAI response dict.
"""
text = self.prompt + value
span = (len(self.prompt), len(text))
result = cached_completion(
rng=0,
model=self.engine,
prompt=text,
temperature=self.temperature,
max_tokens=0,
echo=True,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
logprobs=0,
)['choices'][0]
logprobs = result['logprobs']
start = bisect.bisect(logprobs['text_offset'], span[0]) - 1
end = bisect.bisect_left(logprobs['text_offset'], span[1])
excerpt = text[span[0]:span[1]]
joined = ''.join(logprobs['tokens'][start:end])
assert excerpt in joined
total_logprob = sum(logprobs['token_logprobs'][start:end])
if raw:
return (total_logprob, result)
return total_logprob
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scratchpad/chain of thought, demonstrated on gsm8k.
Scratchpad: https://arxiv.org/abs/2112.00114
Chain of thought: https://arxiv.org/abs/2201.11903
Self consistency (https://arxiv.org/abs/2203.11171) is done by marginalizing out
the `thought` in the `infer` operator.
"""
import dataclasses
import re
from typing import Dict, Iterable, Tuple, Optional, Text, List
import cascades as cc
from cascades.examples.tasks import gsm8k
# Standard chain of thought arithmetic prompts.
# From https://arxiv.org/abs/2112.00114
PROMPTS = """Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?
A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.
Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?
A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.
Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?
A: Leah had 32 chocolates and Leah’s sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.
Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?
A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.
Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?
A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.
Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?
A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.
Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?
A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?
A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.
"""
@dataclasses.dataclass
class ReasonIO:
"""Represents a question and answer, together with reasoning."""
question: Text
reason: Optional[Text] = None
answer: Optional[Text] = None
id: Optional[Text] = dataclasses.field(default=None)
# From https://arxiv.org/abs/2203.11171
def _process_part(qa: Text) -> ReasonIO:
question, reason = qa.split('A:')
question = question.replace('Q:', '')
reason, answer = reason.split('The answer is')
answer = answer.replace('.', '')
return ReasonIO(
question=question.strip(), reason=reason.strip(), answer=answer.strip())
def load_chain_examples() -> Tuple[ReasonIO]:
"""Load the standard chain of thought prompts used in the paper."""
parts = PROMPTS.split('Q:')[1:]
chain_prompts = tuple(_process_part(x) for x in parts)
return chain_prompts
def load_gsm8k(base_dir=gsm8k.GSM8K_PATH):
"""Load and process the gsm8k dataset."""
splits = gsm8k.load_dataset(base_dir=base_dir)
train = tuple(map(process_gsm8k_example, splits['train']))
test = tuple(map(process_gsm8k_example, splits['test']))
return dict(train=train, test=test)
def process_gsm8k_example(x: Dict[Text, Text]) -> ReasonIO:
"""Convert gsm8k dicts into ReasonIO."""
return ReasonIO(
question=x['question'],
reason=x['answer'].split('####')[0].strip(),
answer=x['final_answer'],
id=x['id'])
def fewshot_prompt(examples: Iterable[ReasonIO],
target: Optional[ReasonIO] = None):
"""Construct a few shot prompt."""
parts = []
for x in examples:
if x.reason is None:
raise ValueError('Must provide reason for few shot.')
if x.answer is None:
raise ValueError('Must provide answer for few shot.')
part = f'Question: {x.question}\nReason: {x.reason}\nAnswer: {x.answer}'
parts.append(part)
if target:
part = f'Question: {target.question}\nReason'
parts.append(part)
else:
parts.append('')
prompt = '\n===\n'.join(parts).strip()
return prompt
@cc.model
def sample_with_prompts(target: ReasonIO,
examples: List[ReasonIO],
n_prompts: Optional[int] = None,
lm=None,
max_calls=4):
"""Sample an answer for target given prompts.
Args:
target: Target task.
examples: Prompting tasks.
n_prompts: If None, use all examples in prompt. Otherwise, select this many
examples to place in prompt.
lm: Language model to use.
max_calls: Max # of times to iterate the LM sampling.
Yields:
Cascade distributions (all LM sample nodes in this case).
Returns:
predicted answer string
"""
# Select a random subset and ordering of the prompts.
yield cc.log(value=target.id, name='problem_id')
if n_prompts:
chosen_examples = yield cc.sample(
cc.Choose(k=n_prompts, options=examples), name='choose_prompts')
else:
chosen_examples = examples
# Create few shot prompt
prompt = fewshot_prompt(examples=chosen_examples, target=target)
# Sample until we hit the end of example marker (`===`), then extract
# the answer as rightmost digits.
prediction = yield cc.sample(
cc.String(prompt=prompt, lm=lm, k=max_calls, until='==='), name='thought')
# Find right most number.
nums = re.findall(r'\d+', prediction)
if nums:
answer = nums[-1]
else:
yield cc.reject(reason=f'No answer found in `{prediction}`')
return answer # pytype: disable=name-error # py310-upgrade
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tests for Scratchpads."""
from absl.testing import absltest
import cascades as cc
from cascades.examples import scratchpad
class ScratchpadTest(absltest.TestCase):
def test_sample_solution(self):
examples = scratchpad.load_chain_examples()
target = scratchpad.ReasonIO(
question='Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?',
reason=None,
answer='72',
id='test/123')
mock_lm = cc.mock_lm(
response=': Half of 48 is 24. 24 + 48 is 72.\nAnswer: 72\n===')
model = scratchpad.sample_with_prompts(
lm=mock_lm,
target=target,
examples=examples,
n_prompts=3)
trace = model.sample(seed=0)
self.assertEqual('72', trace.return_value)
self.assertEqual('test/123', trace['problem_id'].value)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reasoning with a calculator."""
import ast
import operator as op
# https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string
# supported operators
operators = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Pow: op.pow,
ast.BitXor: op.xor,
ast.USub: op.neg
}
def eval_arithmetic(expr):
"""Eval an arithmetic expression.
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
Args:
expr: Expression to evaluate
Returns:
Result of evaluating expression.
"""
return _eval_arithmetic(ast.parse(expr, mode='eval').body)
def _eval_arithmetic(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](_eval_arithmetic(node.left),
_eval_arithmetic(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](_eval_arithmetic(node.operand))
else:
raise TypeError(node)
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GSM8k reasoning task."""
import json
import os
open_file = open
GSM8K_PATH = None
def _load_line(line):
example = json.loads(line)
answer = int(example['answer'].split('#### ')[-1].replace(',', ''))
example['final_answer'] = answer
return example
def load_dataset(base_dir=GSM8K_PATH):
"""Load GSM8k raw data from `base_dir`.
Available at:
https://raw.githubusercontent.com/arkilpatel/SVAMP/main/SVAMP.json
Args:
base_dir: Directory containing `train.jsonl` and `test.jsonl`.
Returns:
A dictionary of (train, test).
"""
train_path = os.path.join(base_dir, 'train.jsonl')
test_path = os.path.join(base_dir, 'test.jsonl')
train_xs = []
test_xs = []
with open_file(train_path) as f:
for i, line in enumerate(f):
example = _load_line(line)
example['uid'] = f'train/{i}'
train_xs.append(example)
with open_file(test_path) as f:
for i, line in enumerate(f):
example = _load_line(line)
example['uid'] = f'test/{i}'
test_xs.append(example)
return dict(train=train_xs, test=test_xs)
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constitutional RL dataset."""
import gzip
import json
import os
import gzip
open_file = gzip.open
DATA_PATH = None
VARIANTS = [
'helpful-base', 'helpful-online', 'harmless-base',
'helpful-rejection-sampled', 'red-team-attempts'
]
def read_gz_jsonl(path):
"""Read a compressed jsonl.gz file.
Args:
path: Path to load
Returns:
Tuple of (loaded dicts, lines which gave decode errors.)
"""
with open_file(path, 'rb') as f:
print(path)
if path.endswith('.gz'):
f = gzip.GzipFile(fileobj=f)
xs = []
errors = []
for _, line in enumerate(f):
line = line.decode()
try:
example = json.loads(line)
xs.append(example)
except json.JSONDecodeError:
errors.append(line)
return xs, errors
def load_dataset(variant='helpful-base', base_dir=DATA_PATH):
"""Load Constitutional RL data.
Available at:
https://github.com/anthropics/hh-rlhf
Args:
variant: Dataset subset to load.
base_dir: Directory containing `train.jsonl` and `test.jsonl`.
Returns:
A dictionary of (train, test).
"""
base_dir = os.path.join(base_dir, variant)
if variant == 'red-team-attempts':
train_path = os.path.join(base_dir, 'red_team_attempts.jsonl.gz')
train_xs, _ = read_gz_jsonl(train_path)
return dict(train=train_xs)
else:
train_path = os.path.join(base_dir, 'train.jsonl.gz')
test_path = os.path.join(base_dir, 'test.jsonl.gz')
train_xs, _ = read_gz_jsonl(train_path)
test_xs, _ = read_gz_jsonl(test_path)
return dict(train=train_xs, test=test_xs)
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loaders for bbh dataset: https://arxiv.org/abs/2210.09261."""
import json
import os # pylint: disable=unused-import
open_file = open
list_dir = os.listdir
bbh_path = None
cot_prompts_path = None
def get_task_names():
files = list_dir(bbh_path)
task_names = [f.split('.json')[0] for f in files]
task_names = [f for f in task_names if '.' not in f]
return task_names
def get_cot_prompt(task_name: str):
if task_name not in get_task_names():
raise ValueError(
f'Task {task_name} not a valid bbh task. Consult `get_task_names()` for a list of valid tasks.'
)
prompt_loc = f'{cot_prompts_path}/{task_name}.txt'
with open_file(prompt_loc, 'r') as f:
data = ''.join(f.readlines())
return data
def load_dataset(task_name: str,
base_dir: str = bbh_path,
qa_format: bool = True):
"""Load MATH raw data from disk.
Available at:
https://github.com/suzgunmirac/BIG-Bench-Hard
Args:
task_name: Which bbh task to load
base_dir: Directory containing json files for bbh.
qa_format: whether to prepend Q: and A: to example `input` and `target`
Returns:
A list of examples.
Each example is a dict with keys:
'input': (str) problem statement
'target': (str) the solution
"""
if task_name not in get_task_names():
raise ValueError(
f'Task {task_name} not a valid bbh task. Consult `get_task_names()` for a list of valid tasks.'
)
task_loc = f'{base_dir}/{task_name}.json'
with open_file(task_loc, 'r') as f:
data = json.loads(f.readlines()[0])['examples']
if qa_format:
formatted_examples = []
for d in data:
# uses BIG-bench formatting
formatted_examples.append({
'input': f"Q: {d['input']}",
'target': f"A: {d['target']}"
})
data = formatted_examples
return data
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SVAMP word problem dataset."""
import json
import random
import re
from typing import Text, Optional, Union
import cascades as cc
from cascades.examples import calculator
open_file = open
SVAMP_PATH = None
def load_svamp(seed=0, valid_size=50, test_size=50, json_path=SVAMP_PATH):
"""Load SVAMP dataset from json.
Available at:
https://raw.githubusercontent.com/arkilpatel/SVAMP/main/SVAMP.json
Args:
seed: Seed for shuffling.
valid_size: Number of examples in validation set.
test_size: Number of examples in test set.
json_path: Json file path to load.
Returns:
A dictionary of (train, valid, test).
"""
if not json_path:
raise ValueError('Missing json_path for load_svamp must be specified.')
with open_file(json_path) as f:
dataset = json.loads(f.read())
random.Random(seed).shuffle(dataset)
return dict(
valid=dataset[:valid_size],
test=dataset[valid_size:valid_size + test_size],
train=dataset[valid_size + test_size:])
def _evaluate_equation(text: Text) -> Optional[float]:
try:
output = calculator.eval_arithmetic(text)
return output
except (AttributeError, SyntaxError, ZeroDivisionError) as e:
del e
return None
def _score_prediction(model_answer: Union[float, int, Text],
true_answer: Union[float, int, Text]):
model_answer = str(model_answer)
true_answer = str(true_answer)
if true_answer == model_answer:
return True
else:
return False
def _svamp_format_task(task):
return '{body} : {question}'.format(
body=task['Body'], question=task['Question'])
def _text_extract_regex(text: Text, pattern: Text) -> Optional[Text]:
p = re.compile(pattern)
matches = p.findall(text)
if not matches:
return None
matched_text = matches[0]
return matched_text
def _text_extract_brackets(text: Text) -> Optional[Text]:
"""Extract text within {brackets}."""
return _text_extract_regex(text, pattern=r'{{(.*?)}}')
def _svamp_format_prompt(
task,
examples,
instructions='Write the arithmetic expression to solve each word problem:'):
"""Create prompt based on svamp task."""
formatted_io_examples = [(_svamp_format_task(task),
'{{ %s }}' % task['Equation']) for task in examples]
parts = [instructions]
for question, answer in formatted_io_examples:
parts.append('QUESTION: %s\nANSWER: %s' % (question, answer))
parts.append('QUESTION: %s\nANSWER:' % _svamp_format_task(task))
return '\n\n'.join(parts)
def solve_svamp_task(task, train_set, lm=None, check_correct=False):
"""Generate solution questions to the SVAMP task, conditioned on train_set."""
# TODO(ddohan): Make distribution over conditioning questions a part of model.
prompt = _svamp_format_prompt(task=task, examples=train_set)
# Generate equations
line = yield cc.sample(
cc.String(prompt=prompt, lm=lm, until='\n'), name='gen_equation')
# Clip to 1 line and rescore
# line = line.strip().split('\n')[0]
# TODO(ddohan): Should this go in the prior p(x) instead of likelihood p(y|x)
# yield cc.observe(obs=line, dist=lm(prompt=prompt), name='clip_and_rescore')
# Extract equations between {{ }}
eq = _text_extract_brackets(line)
if eq is None:
yield cc.reject(reason='No equation proposed', name='no_equation')
return
eq = eq.strip() # pylint: disable=attribute-error
eq = eq.replace('{', '').replace('}', '')
# Evaluate the arithmetic using a calculator
calculator_output = _evaluate_equation(eq)
if calculator_output is None:
yield cc.reject(reason=f'Calculator failed: `{eq}`')
if check_correct:
# Did we get the right answer?
correct = _score_prediction(model_answer=calculator_output,
true_answer=task['Answer'])
yield cc.log(correct, 'is_correct')
if not correct:
yield cc.reject(
reason=f'Incorrect answer: expected {task["Answer"]} != {eq} from {calculator_output}',
name='incorrect')
return (eq, calculator_output)
# Maximize p(score | task) marginalized over solutions
# params = set of prompts we use in the prefix.
def svamp_optimize_prompt(lm, task, train_set, k=5):
"""Choose k tasks to put into prompt from training examples."""
idxs = yield cc.sample(dist=cc.Choose(k=k, options=range(len(train_set)))) # pytype: disable=wrong-arg-types
prompt_tasks = [train_set[i] for i in idxs]
result = yield solve_svamp_task(lm=lm, task=task, train_set=prompt_tasks)
return result
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loaders for MATH dataset."""
import functools
import json
open_file = open
MATH_PATH = None
def get_category(math_ds,
category='Prealgebra',
split='train',
length_sorted=False):
tasks = [x for x in math_ds[split] if x['type'] == category]
if length_sorted:
tasks = sorted(tasks, key=lambda x: len(x['problem']) + len(x['solution']))
return tasks
@functools.lru_cache()
def load_dataset(base_dir=MATH_PATH):
"""Load MATH raw data from disk.
Available at:
https://github.com/hendrycks/math/
Args:
base_dir: Directory containing `train.jsonl` and `test.jsonl`.
Returns:
A dictionary of (train, test).
Each example is a dict with keys:
'index': (int) example number
'level': (int) difficulty
'problem': (str) problem statement
'solution': (str) an example solution
"""
with open_file(base_dir, 'r') as f:
data = json.loads(f.readlines()[0])
return data
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to execute code for evaluation across multiple languages."""
import collections
import json
import os
import pathlib
import random
import shutil
from typing import Optional
import gin
import tensorflow as tf
from absl import app
from absl import flags
from absl import logging
from babelcode import QUESTION_DATA_KEYS
from babelcode import execute_bc_predictions
from babelcode import load_progress_from_dir
from babelcode import utils
from babelcode.languages import LanguageRegistry
FLAGS = flags.FLAGS
_GIN = flags.DEFINE_string('gin_file', None, 'Gin configuration file.')
_EXP_NAME = flags.DEFINE_string('experiment_name', None,
'Name of the experiment.')
_OUTPUT_PATH = flags.DEFINE_string('output_path', None, 'Output path.')
_PRED_PATH = flags.DEFINE_string('predictions', None, 'Prediction path.')
_TEST_CODE_PATH = flags.DEFINE_string('test_code', None,
'pathlib.Path to testing code')
_OVERWRITE = flags.DEFINE_bool('overwrite', False,
'Overwrite output file if it exists')
_LANGUAGES = flags.DEFINE_list('languages', None,
'Comma separated list of languages to run')
_DEBUG = flags.DEFINE_bool('debug', False, 'Enable Debug mode')
_NUM_CORES = flags.DEFINE_integer('cpu_count',
None,
help='Number of CPUs to use.')
_DEBUG_NUM_PREDS = flags.DEFINE_integer(
'debug_num_preds', -1, help='Debugging number of predictions to use.')
_PREDS_PER_QUESTION = flags.DEFINE_integer(
'samples', None, 'Number of predictions per question.')
_STEP = flags.DEFINE_integer('step', 0, 'Step to use for tensorboard.')
_FORCE_QUESTION_ENTRY = flags.DEFINE_bool(
'use_question_entry',
False,
'Force using the question entry points instead of the prediction ones.',
)
_VALIDATION_MODE = flags.DEFINE_bool('validation', False,
'Enable validation printing.')
_DEBUG_DIR_PATH = flags.DEFINE_string("debug_dir", None,
"Debugging dir to save code to.")
_ALLOW_EXECUTION = flags.DEFINE_bool("allow_execution", False,
"Allow Execution")
@gin.configurable(
'general',
denylist=[
'experiment_name', 'prediction_path', 'output_path', 'test_code_path',
'overwrite', 'eval_languages', 'debug_code_gen_dir'
],
)
def evaluate_predictions_from_file(
experiment_name: str,
prediction_path: pathlib.Path,
output_path: pathlib.Path,
test_code_path: Optional[pathlib.Path],
overwrite: bool,
eval_languages: Optional[str] = None,
debug: bool = False,
debug_code_gen_dir: Optional[str] = None,
debug_num_preds: int = -1,
seed: int = 1,
validation_mode: bool = False,
):
"""Evaluates a set of predictions.
Take in a set of predictions files, and execute them against test cases.
Results are saved to a `results.json` file in the `output_path`.
Args:
experiment_name: The name of the experiment.
prediction_path: The path to the predictions.
output_path: The output path.
test_code_path: The path to the testing code.
overwrite: Overwrite the output directory if found.
eval_languages: The languages to restrict evaluation to.
debug: Enable debugging.
debug_code_gen_dir: Save the generated testing code to a local directory
instead of a temporary one.
debug_num_preds: Only debug a specific number of predictions.
seed: The seed to use.
validation_mode: Enable validation mode where metrics are not reported, but
the predictions that had an error or timed out are.
Raises:
<Any>:
"""
debug = debug_num_preds > 0 or debug
if debug:
experiment_name = f'{experiment_name}.DEBUG'
print('Debug is enabled.')
output_path = output_path.joinpath(experiment_name)
must_load_progress = False
if output_path.exists():
if overwrite:
shutil.rmtree(output_path)
else:
must_load_progress = True
output_path.mkdir(exist_ok=True, parents=True)
utils.setup_logging('logs', debug, log_path=output_path)
logging.info('Evaluating predictions from %s', prediction_path)
logging.info('Saving to %s', output_path)
logging.info('Reading tests from %s', test_code_path)
summary_writer = tf.summary.create_file_writer(str(output_path),
flush_millis=120)
existing_results = {}
if must_load_progress:
# If a language was stopped mid evaluation, it will still have its
# temporary path. so check that first.
logging.info('Getting executed results from %s', output_path)
existing_results = load_progress_from_dir(output_path)
else:
logging.info('No prior results found')
# Allow the use of multiple language predictions in a single file by using
# the language in the key during reading.
all_predictions = list(map(json.loads, prediction_path.open()))
logging.info('Found %d total predictions', len(all_predictions))
# Separate the predictions into their own language buckets.
preds_by_lang = collections.defaultdict(dict)
question_id_counter = collections.Counter()
logging.debug('Grouping predictions by language')
for prediction in all_predictions:
language = prediction[f"language"]
pid = question_id_counter[f"{prediction['language']}/{prediction['qid']}"]
prediction['id'] = pid
preds_by_lang[language][f"{prediction['qid']}/{pid}"] = prediction
question_id_counter[f"{prediction['language']}/{prediction['qid']}"] += 1
langs_found = list(sorted(preds_by_lang))
logging.info('%d language(s) found', len(langs_found))
logging.info('# Preds by Language:')
for lang in langs_found:
msg = f'{lang:>10} = {len(preds_by_lang[lang])}'
logging.info(msg)
if eval_languages:
if any(l not in langs_found for l in eval_languages):
raise ValueError(f'{eval_languages=} not found in predictions')
logging.warning('Only evaluating %s', eval_languages)
langs_found = eval_languages
# Save the gin config and the information on where the predictions and test
# code are located to two files in the output directory.
logging.info('Saving launch information...')
with output_path.joinpath('config.gin').open('w') as f:
f.write(gin.config_str())
with output_path.joinpath('launch_info.json').open('w') as f:
json.dump(
{
'prediction_path': str(prediction_path),
'test_code_path': str(test_code_path),
},
f,
indent=True,
)
question_mapping = collections.defaultdict(dict)
found = 0
if test_code_path:
logging.info('Reading questions from %s', test_code_path)
for line in map(json.loads,
test_code_path.joinpath('testing_code.jsonl').open()):
question_mapping[line['language']][str(line['qid'])] = line
found += 1
else:
logging.info('Making question mapping from predictions found.')
for lang, preds in preds_by_lang.items():
for pid, pred in preds.items():
qid, _ = pid.split('/')
question_mapping[lang][qid] = {k: pred[k] for k in QUESTION_DATA_KEYS}
found += len(question_mapping[language])
logging.info('Found %d questions across %d languages', found,
len(question_mapping))
all_metrics = {}
all_pred_metrics = []
all_error_per_lang = {}
for lang_name in langs_found:
lang = LanguageRegistry.get_language(lang_name)
if debug_num_preds > 0 and len(preds_by_lang[lang_name]) > debug_num_preds:
logging.warning('ONLY USING %d PREDICTIONS', debug_num_preds)
# Set the seed for debugging a subset.
utils.set_seed(seed)
to_keep = random.sample(list(preds_by_lang[lang_name]), debug_num_preds)
preds_to_use = {
k: v for k, v in preds_by_lang[lang_name].items() if k in to_keep
}
else:
preds_to_use = preds_by_lang[lang_name]
metrics, pred_results = execute_bc_predictions(
lang=lang,
question_mapping=question_mapping[lang_name],
raw_predictions=preds_to_use,
output_path=output_path,
debug_dir_path=debug_code_gen_dir, # type: ignore
seed=seed,
step=_STEP.value,
executed_predictions=existing_results.get(lang_name, {}),
summary_writer=summary_writer,
force_question_entry=_FORCE_QUESTION_ENTRY.value,
) # type:ignore
# Add the number of questions with all predictions having error.
with_all_error = []
with_all_timed_out = []
num_questions = 0
for k, metric_dict in metrics.items():
# Skip the overview metrics.
if 'metrics' in k:
continue
num_preds = metric_dict['num_predictions']
qid = k.split('/')[-1]
title = question_mapping[lang_name][qid]['title']
if num_preds == metric_dict['Had Error']:
with_all_error.append(f'{title} ({qid=})')
elif num_preds == metric_dict['Timed Out']:
with_all_timed_out.append(f'{title} ({qid=})')
num_questions += 1
all_error_per_lang[lang_name] = (
with_all_error,
with_all_timed_out,
num_questions,
)
all_metrics.update(metrics)
all_pred_metrics.extend(pred_results)
metric_path = output_path.joinpath('metrics.json')
logging.info('Saving metrics to %s', metric_path)
with metric_path.open('w') as f:
json.dump(all_metrics, f)
pred_result_path = output_path.joinpath('pred_results.jsonl')
logging.info('Saving all prediction results to %s', pred_result_path)
with pred_result_path.open('w') as f:
for l in all_pred_metrics:
f.write(json.dumps(l.to_dict()) + '\n')
if validation_mode:
logging.info(
'Number of Questions With Issues For %d languages:',
len(all_error_per_lang),
)
for lang_name, (
with_error,
with_timeout,
num_questions,
) in all_error_per_lang.items():
msg = (f'{lang_name:>16} ='
f' {len(with_error)+len(with_timeout)}/{num_questions}')
logging.info(msg)
logging.info('%s\tWith Error=%s', ' ' * 16, with_error)
logging.info('%s\tWith Timeout=%s', ' ' * 16, with_timeout)
else:
logging.info('Metrics:')
for lang_name in langs_found:
logging.info('\t%s:', lang_name)
metrics = all_metrics[f'{lang_name}/metrics']
to_print = ['questions_passed']
for k in metrics:
if 'subsampling_pass' in k or 'estimate' in k:
to_print.append(k)
for k in sorted(to_print):
value = metrics[k]
key_str = f'{k:>32}'
if isinstance(value, float):
value_str = f'{value:.3f}'
elif isinstance(value, (dict, list, tuple)):
continue
else:
value_str = f'{value}'
logging.info('\t%s = %s', key_str, value_str)
if __name__ == '__main__':
def eval_preds_main(_):
"""Main entry point to the launch."""
FLAGS['alsologtostderr'].value = True
# Create gin bindings to overwrite the config.
bindings = []
if _DEBUG.value:
bindings.append('general.debug=True')
if _NUM_CORES.value:
bindings.append(f'execution.num_workers={_NUM_CORES.value}')
if _DEBUG_NUM_PREDS.value > 0:
bindings.append(f'general.debug_num_preds={_DEBUG_NUM_PREDS.value}')
if _PREDS_PER_QUESTION.value is not None and _PREDS_PER_QUESTION.value > 0:
bindings.append(
f'metrics.num_preds_per_question={_PREDS_PER_QUESTION.value}')
print(f'gin_{bindings=}')
gin_path = pathlib.Path(_GIN.value).resolve()
print(f'Gin pathlib.Path={gin_path}')
gin.parse_config_file(str(gin_path))
gin.parse_config(bindings=bindings)
test_code_path = None
if _TEST_CODE_PATH.value:
test_code_path = pathlib.Path(_TEST_CODE_PATH.value).resolve()
if _ALLOW_EXECUTION.value:
os.environ['ALLOW_EXECUTION'] = 'true'
debug_dir = _DEBUG_DIR_PATH.value
if debug_dir:
debug_dir = pathlib.Path(debug_dir)
evaluate_predictions_from_file(
experiment_name=_EXP_NAME.value,
prediction_path=pathlib.Path(_PRED_PATH.value).resolve(),
output_path=pathlib.Path(_OUTPUT_PATH.value).resolve(),
test_code_path=test_code_path,
overwrite=_OVERWRITE.value,
validation_mode=_VALIDATION_MODE.value,
eval_languages=_LANGUAGES.value,
debug_code_gen_dir=debug_dir)
flags.mark_flags_as_required([
_GIN.name,
_EXP_NAME.name,
_OUTPUT_PATH.name,
_PRED_PATH.name,
])
app.run(eval_preds_main)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to generate testing code for a given set of problems."""
import json
import pathlib
import shutil
from typing import Optional
import gin
from absl import app
from absl import flags
from absl import logging
from babelcode import generate_code_for_questions
from babelcode.data_types.question import read_input_questions
from babelcode.languages import LanguageRegistry
from babelcode.utils import setup_logging
def generate_problem_code_main(
gin_path: str,
input_path: pathlib.Path,
output_path: pathlib.Path,
debug_lang: Optional[str],
debug: bool,
):
"""Generates the question code for a dataset."""
setup_logging('generate_tests', debug)
logging.info('Generating tests')
logging.info('Loading gin file from %s', gin_path)
gin.parse_config_file(str(gin_path))
logging.info('Reading from %s', input_path)
logging.info('Saving all generated tests to %s', output_path)
output_path.mkdir(parents=True, exist_ok=True)
failures_path = output_path.joinpath('failures')
shutil.rmtree(failures_path, True)
failures_path.mkdir()
questions, failed = read_input_questions(input_path=input_path)
logging.info('Found %d questions', len(questions))
if failed:
logging.error('%s failed to parse.', len(failed))
with failures_path.joinpath('read_failed.txt').open('w') as f:
for line, reason in failed:
f.write(f'{reason}: {json.dumps(line)}\n')
langs_to_use = LanguageRegistry.list_languages()
if debug_lang:
langs_to_use = [debug_lang]
all_questions = []
all_prompts = []
logging.info('%s total language(s) to generate tests for', len(langs_to_use))
for lang_name in langs_to_use:
lang = LanguageRegistry.get_language(lang_name)
parsed, failed = generate_code_for_questions(questions=questions, lang=lang)
for q, p in parsed:
all_questions.append({'language': lang_name, **q})
all_prompts.append({'language': lang_name, **p})
with failures_path.joinpath(f'{lang_name}_failed.jsonl').open('w') as f:
for question, reason in failed:
f.write(
json.dumps({
'qid': question.qid,
'reason': str(reason),
'error': type(reason).__name__,
'question': question.to_dict(),
}) + '\n')
with output_path.joinpath('testing_code.jsonl').open('w') as f:
logging.info('Saving questions to %s',
output_path.joinpath('testing_code.jsonl'))
for p in all_questions:
f.write(json.dumps(p) + '\n')
with output_path.joinpath('prompt_info.jsonl').open('w') as f:
logging.info('Saving prompt info to %s',
output_path.joinpath('prompt_info.jsonl'))
for p in all_prompts:
f.write(json.dumps(p) + '\n')
if __name__ == '__main__':
FLAGS = flags.FLAGS
_GIN_FILE = flags.DEFINE_string('gin_file', None, help='Path to gin file.')
_INPUT = flags.DEFINE_string('input',
None,
help='pathlib.Path to input problems.')
_OUTPUT = flags.DEFINE_string('output', None, help='pathlib.Path to output.')
_LANG = flags.DEFINE_string('debug_lang',
None,
help='Debug a single language')
_DEBUG = flags.DEFINE_bool('debug', False, help='Debug')
def main(_):
FLAGS['alsologtostderr'].value = True
generate_problem_code_main(
_GIN_FILE.value,
pathlib.Path(_INPUT.value),
pathlib.Path(_OUTPUT.value),
_LANG.value,
_DEBUG.value,
)
flags.mark_flags_as_required([_GIN_FILE.name, _INPUT.name, _OUTPUT.name])
app.run(main)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
VERSION = "1.0.0"
setup(
name="babelcode",
version=VERSION,
description=
"A framework for execution-based evaluation of any dataset in any language.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
project_urls={
'Source': 'https://github.com/google-research/babelcode',
},
license='Apache License, Version 2.0',
author='Google Inc.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research"',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
packages=find_packages(where='.', include=['babelcode*', 'tests*']),
install_requires=[
'jinja2>=3.1.2',
'numpy>=1.23.1',
'pandas>=1.4.3',
'tqdm>=4.64.0',
'psutil>=5.9.2',
'absl-py>=1.2.0',
'tensorflow>=2.10.0',
'gin-config>=0.5.0',
],
)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to convert a python dataset to a parsed version.
Usage:
python convert_dataset.py --dataset_name="$DATASET_NAME" --input_path="$DATASET_LOCATION"
"""
# pylint: disable=logging-fstring-interpolation
import collections
import copy
import dataclasses
import json
import pathlib
from typing import Any, Dict, Optional
from absl import app
from absl import flags
from absl import logging
from babelcode import utils
from babelcode.dataset_conversion import POTENTIAL_ERROR_TYPES
from babelcode.dataset_conversion import parse_question_dict
from babelcode.languages import LanguageRegistry
Path = pathlib.Path
@dataclasses.dataclass
class RawQuestion:
"""Dataclass used to validate that the keys for each lin in the input file."""
id: str
title: str
testing_code: str
entry_fn_name: str
solution: str
text: Optional[str] = None
entry_cls_name: str = 'Solution'
metadata: Dict[str, Any] = dataclasses.field(default_factory=dict)
other_lang_solutions: Dict[str, str] = dataclasses.field(default_factory=dict)
def convert_raw_questions(read_questions: Dict[str, Dict[str, Any]],
fixes: Dict[str, Dict[str, Any]]):
"""Convert a dictionary of raw questions into the parsed form.
Args:
read_questions: The map of question id to raw question object.
fixes: The question specific fixes to use.
Returns:
The map of question id to the parsed questions and the failures.
"""
# Make the raw question map
raw_question_map = {}
other_languages = set()
for qid, raw_question in read_questions.items():
fix = fixes.pop(qid, {})
raw_question.update(fix)
try:
raw_question_map[qid] = RawQuestion(**raw_question)
other_languages.update(list(raw_question_map[qid].other_lang_solutions))
except ValueError as e:
raise ValueError(f'qid={qid} does not have correct keys.') from e
logging.info('Found additional solutions in %s', other_languages)
other_languages = {
k: LanguageRegistry.get_language(k) for k in other_languages
}
# Parse each question of the dataset
failed_to_parse = collections.defaultdict(list)
parsed_questions = {}
logging.info('Starting conversion...')
for qid, raw_question in raw_question_map.items():
try:
parsed_question = parse_question_dict(
qid=qid,
testing_code=raw_question.testing_code,
solution=raw_question.solution,
entry_fn_name=raw_question.entry_fn_name,
)
except POTENTIAL_ERROR_TYPES as e:
logging.warning(f'Question {qid} failed to parse with error {e}')
failed_to_parse[type(e).__name__].append((str(e), qid))
continue
parsed_question = {
'qid': raw_question.id,
'title': raw_question.title,
'entry_cls_name': raw_question.entry_cls_name,
'text': raw_question.text,
'metadata': raw_question.metadata,
**parsed_question,
}
old_entry_point = parsed_question['entry_fn_name']
new_entry_point = utils.format_str_with_convention(
utils.NamingConvention.SNAKE_CASE, parsed_question['entry_fn_name'])
solutions = {}
for lang, sol in raw_question.other_lang_solutions.items():
lang_name = utils.format_str_with_convention(
other_languages[lang].naming_convention, old_entry_point)
solutions[lang] = sol.replace(old_entry_point, lang_name)
solutions['Python'] = raw_question.solution.replace(old_entry_point,
new_entry_point)
parsed_question['solutions'] = solutions
parsed_question['entry_fn_name'] = new_entry_point
gold_predictions = []
for lang, sol in solutions.items():
gold_predictions.append({
'qid': raw_question.id,
'id': 0,
'code': sol,
'language': lang,
'testing_code': '',
'text': raw_question.text
})
parsed_questions[raw_question.id] = (parsed_question, gold_predictions)
return parsed_questions, failed_to_parse
def convert_dataset(
dataset_name: str,
input_path: Path,
debug: bool,
disable_fixes: bool = False,
debug_question: str = None,
):
"""Converts a dataset to BabelCode format.
Args:
dataset_name: Name of the dataset.
input_path: The input path of the dataset.
debug: Enable debugging.
disable_fixes: Disable the use of fixes.
debug_question: Debug a specific quesiton id.
"""
print('Starting Dataset Conversion.')
output_path = utils.PROJECT_ROOT.joinpath('data')
utils.setup_logging(f'convert_{dataset_name}', debug=debug)
logging.info('Converting %s located at "%s"', dataset_name, input_path)
fixes = {}
if not disable_fixes:
fix_file_path = utils.PROJECT_ROOT.joinpath('data', 'dataset_fixes',
f'{dataset_name}.jsonl')
if fix_file_path.exists():
logging.info('Using fixes file located at %s', fix_file_path)
for line_number, line in enumerate(fix_file_path.open()):
try:
line = json.loads(line)
except json.JSONDecodeError as e:
# Loading each line in the json file will not provide good debug
# information. So we print the line number if a line failed.
logging.error('Line %s had json error', line)
logging.error('line=%d', line_number)
raise e
qid = str(line.pop('id'))
fixes[qid] = line
else:
logging.warning('Fixes are enabled but no fixes found at %s',
fix_file_path)
else:
logging.info('No fixes passed')
logging.info('Reading questions')
raw_question_map = {}
for line_number, line in enumerate(map(json.loads, input_path.open())):
raw_question_map[line['id']] = line
if len(raw_question_map) % 50 == 0:
logging.info('Found %d questions', len(raw_question_map))
logging.info(
'%d raw questions found after reading %s',
len(raw_question_map),
input_path,
)
logging.info(
'%d fixes were unused.',
sum(1 for q in fixes if q not in raw_question_map),
)
if debug_question:
logging.warning('Only parsing debug_question=%s', debug_question)
raw_question_map = {debug_question: raw_question_map[debug_question]}
parsed_questions, failed_to_parse = convert_raw_questions(
raw_question_map, copy.deepcopy(fixes))
if failed_to_parse:
logging.warning(
f'{sum(map(len,failed_to_parse.values()))}/{len(raw_question_map)} failed'
' to parse.')
else:
logging.info('All questions parsed successfully')
failure_path = utils.PROJECT_ROOT.joinpath('data', 'convert_failures')
# shutil.rmtree(failure_path,ignore_errors=True)
failure_path.mkdir(parents=True, exist_ok=True)
logging.debug(f'Saving failures to {failure_path}')
logging.info('Failure Count by Type:')
with failure_path.joinpath(f'{dataset_name}.txt').open('w') as f:
for e_type, failures in failed_to_parse.items():
logging.info(f'{e_type:>24} = {len(failures)}')
for reason, qid in failures:
if qid in fixes:
logging.error(f'The fix for {qid} failed.')
f.write(f'Question {qid} => {e_type}: {reason}\n')
gold_path = output_path.joinpath('golden_predictions')
gold_path.mkdir(parents=True, exist_ok=True)
gold_path = gold_path.joinpath(f'{dataset_name}.jsonl')
output_path = output_path.joinpath('parsed_datasets')
output_path.mkdir(parents=True, exist_ok=True)
output_path = output_path.joinpath(f'{dataset_name}.jsonl')
logging.info(f'Saving to {output_path}')
logging.info(f'Saving golden predictions to {gold_path}')
with gold_path.open('w') as gold_file:
with output_path.open('w') as parsed_file:
for qid, (parsed, gold) in parsed_questions.items():
parsed_file.write(f'{json.dumps(parsed)}\n')
for pred in map(json.dumps, gold):
gold_file.write(f'{pred}\n')
if __name__ == '__main__':
FLAGS = flags.FLAGS
_DATASET_NAME = flags.DEFINE_string('dataset_name', None, help='Dataset name')
_INPUT_PATH = flags.DEFINE_string('input_path', None, help='Input path')
_DEBUG = flags.DEFINE_bool('debug', False, help='Debug')
_DISABLE_FIXES = flags.DEFINE_bool('disable_fixes',
False,
help='Disable fixes')
_DEBUG_QID = flags.DEFINE_string('debug_question',
None,
help='Single question id to debug.')
def convert_main(_):
FLAGS['alsologtostderr'].value = True
convert_dataset(
dataset_name=_DATASET_NAME.value,
input_path=Path(_INPUT_PATH.value),
disable_fixes=_DISABLE_FIXES.value,
debug=_DEBUG.value,
debug_question=_DEBUG_QID.value,
)
flags.mark_flags_as_required([_DATASET_NAME.name, _INPUT_PATH.name])
app.run(convert_main)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common fixtures for testing."""
import json
import pathlib
import pytest
from babelcode.data_types.prediction import Prediction
from babelcode.schema_parsing.schema_type import SchemaType
from babelcode.utils import FIXTURES_PATH
@pytest.fixture()
def fixtures_root():
yield pathlib.Path(__file__).parent
@pytest.fixture()
def code_dir(fixtures_root):
yield fixtures_root.joinpath('code')
@pytest.fixture()
def py_failing(code_dir):
yield code_dir.joinpath('failing.py')
@pytest.fixture()
def py_passing(code_dir):
yield code_dir.joinpath('passing.py')
@pytest.fixture
def sample_execution_results():
sample_file = FIXTURES_PATH.joinpath('sample_prediction_results.jsonl')
out = []
for line in map(json.loads, sample_file.open()):
stdout_str = '\n'.join(
f'TEST-{i}...{v}' for i, v in enumerate(line['test_results']))
pred_id = str(line['id'])
qid = str(line['qid'])
fp = pathlib.Path(f'{qid}_{id}.test')
pred_info = Prediction(id=pred_id, qid=qid, lang=line['lang'], file_path=fp)
had_error = len(line['stderr']) > 0
out.append(
dict(
prediction=pred_info.to_dict(),
commands=['testing'],
stdout=stdout_str,
stderr=line['stderr'],
return_code=1 if had_error else 0,
net_runtime=line['net_runtime'],
command_runtimes=line['command_runtimes'],
last_ran_command_idx=0,
had_error=had_error,
))
yield out
@pytest.fixture
def sample_question_info():
yield json.loads(FIXTURES_PATH.joinpath('sample_questions.json').read_text())
@pytest.fixture()
def sample_schema():
yield {
'arg0': SchemaType.from_generic_type_string('list<list<string>>'),
'arg1': SchemaType.from_generic_type_string('boolean'),
'expected': SchemaType.from_generic_type_string('integer'),
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
raise ValueError('This should fail!')
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
print('TEST-0...PASSED')
sys.exit(0)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for code_generator.py."""
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
import math
import pathlib
import pytest # pylint: disable=unused-import
from babelcode import code_generator
from babelcode import data_types
from babelcode import schema_parsing
SchemaType = schema_parsing.SchemaType
Question = data_types.Question
@pytest.mark.parametrize('schema_type',
['float', 'double', 'list<float>', 'string'])
def test_determine_question_requirements(schema_type):
"""Tests determing question specific requirements."""
double_precision = 1e-10
float_precision = 1e-5
question = data_types.Question(
qid='0',
schema=[
{
'name': 'arg0',
'type': schema_type
},
{
'name': data_types.EXPECTED_KEY_NAME,
'type': schema_type
},
],
title='testing',
test_list=[],
entry_fn_name='test',
)
schema = {
'arg0':
SchemaType.from_generic_type_string(schema_type),
data_types.EXPECTED_KEY_NAME:
SchemaType.from_generic_type_string(schema_type),
}
result = code_generator._determine_question_requirements(
question,
schema,
double_precision=double_precision,
float_precision=float_precision,
)
if schema_type == 'float':
assert math.isclose(result['precision'], float_precision)
assert result['evaluation_method'] == 'float'
assert result['use_float']
elif schema_type == 'double':
assert math.isclose(result['precision'], double_precision)
assert result['evaluation_method'] == 'float'
assert not result['use_float']
else:
assert result['evaluation_method'] == 'default'
def test_load_template_map(tmp_path: pathlib.Path):
"""Tests the loading of templates."""
template_map = {
'HEADER': 'header.txt',
'MAIN': 'main.txt',
'EVALUATION': 'evaluation.txt',
}
for k in template_map:
template_map[k] = tmp_path.joinpath(template_map[k])
with template_map[k].open('w') as f:
f.write(f'{k}: ' + '{{inputs}}')
result_map = code_generator.load_template_map(template_map)
assert set(result_map.keys()) == {'HEADER', 'MAIN', 'EVALUATION'}
for k, template in result_map.items():
result = template.render(inputs='testing')
assert result == f'{k}: testing'
def test_naive_obfuscation():
schema = {
'params': [
{
'name': 'always_money_in',
'type': 'integer'
},
{
'name': 'testing',
'type': 'boolean'
},
],
'return': {
'type': 'string'
},
}
tests = [
{
'idx': 0,
'inputs': {
'always_money_in': 1,
'testing': True
},
'outputs': 'test',
},
{
'idx': 1,
'inputs': {
'always_money_in': 2,
'testing': False
},
'outputs': 'test',
},
]
question = Question('1',
schema=schema,
test_list=tests,
entry_fn_name='test',
title='Test')
expected = Question(
'1',
schema={
'params': [
{
'name': 'arg0',
'type': 'integer'
},
{
'name': 'arg1',
'type': 'boolean'
},
],
'return': {
'type': 'string'
},
},
test_list=[
{
'idx': 0,
'inputs': {
'arg0': 1,
'arg1': True
},
'outputs': 'test',
},
{
'idx': 1,
'inputs': {
'arg0': 2,
'arg1': False
},
'outputs': 'test',
},
],
entry_fn_name='model_prediction',
entry_cls_name='Prediction',
title='Test',
use_type_annotation=True,
)
result = code_generator.naive_obfuscation(question,
force_type_annotation=True)
assert result == expected
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test configuration module."""
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
import json
import pathlib
import shutil
import pytest
from babelcode import data_types
from babelcode.data_types.prediction import Prediction
from babelcode.schema_parsing.schema_type import SchemaType
from babelcode.utils import FIXTURES_PATH
@pytest.fixture()
def code_dir():
"""Code directory fixture."""
yield FIXTURES_PATH.joinpath('code')
@pytest.fixture()
def py_failing(code_dir):
"""Failing Python Program fixture."""
yield code_dir.joinpath('failing.py')
@pytest.fixture()
def py_passing(code_dir):
"""Passing Python Program fixture."""
yield code_dir.joinpath('passing.py')
@pytest.fixture
def sample_execution_results():
"""Sample execution results fixture."""
sample_file = FIXTURES_PATH.joinpath('sample_prediction_results.jsonl')
out = []
for line in map(json.loads, sample_file.open()):
stdout_str = '\n'.join(
f'TEST-{i}...{v}' for i, v in enumerate(line['test_results']))
pred_id = str(line['id'])
qid = str(line['qid'])
fp = pathlib.Path(f'{qid}_{id}.test')
pred_info = Prediction(id=pred_id, qid=qid, lang=line['lang'], file_path=fp)
had_error = len(line['stderr']) > 0 # pylint:disable=g-explicit-length-test
out.append(
dict(
prediction=pred_info.to_dict(),
commands=['testing'],
stdout=stdout_str,
stderr=line['stderr'],
return_code=1 if had_error else 0,
net_runtime=line['net_runtime'],
command_runtimes=line['command_runtimes'],
last_ran_command_idx=0,
had_error=had_error,
))
yield out
@pytest.fixture
def sample_question_info():
"""Sample question info fixture."""
yield json.loads(FIXTURES_PATH.joinpath('sample_questions.json').read_text())
@pytest.fixture()
def sample_schema():
"""Sample schema fixture."""
yield {
'arg0':
SchemaType.from_generic_type_string('list<list<string>>'),
'arg1':
SchemaType.from_generic_type_string('boolean'),
data_types.EXPECTED_KEY_NAME:
SchemaType.from_generic_type_string('integer'),
}
@pytest.fixture()
def passing_prediction(tmp_path, py_passing):
pass_path = tmp_path.joinpath('passing')
pass_path.mkdir(parents=True)
tmp_file = pass_path.joinpath(py_passing.name)
shutil.copyfile(py_passing, tmp_file)
yield Prediction('1',
'PASS',
'Python',
code=py_passing.read_text(),
file_path=tmp_file)
@pytest.fixture()
def failing_prediction(tmp_path, py_failing):
fail_path = tmp_path.joinpath('failing')
fail_path.mkdir(parents=True)
tmp_file = fail_path.joinpath(py_failing.name)
shutil.copyfile(py_failing, tmp_file)
yield Prediction('1',
'FAIL',
'Python',
code=py_failing.read_text(),
file_path=tmp_file)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_execution."""
from babelcode import run_execution
from babelcode.data_types.prediction import Prediction
from babelcode.languages import LanguageRegistry
def test_setup_language_code_dirs(tmp_path):
preds = [{
'id': '1',
'qid': '1',
'code': 'Pred 1.1',
'entry_fn_name': 'testd'
}, {
'id': '2',
'qid': '1',
'code': 'Pred 1.2',
'entry_fn_name': 'tesrt'
}, {
'id': '1',
'qid': '2',
'code': 'Pred 2.1',
'entry_fn_name': 'teqst',
}, {
'id': '1',
'qid': '10',
'code': 'Pred 10.1',
'entry_fn_name': 'tsest'
}]
preds = {f'{v["qid"]}_{v["id"]}': v for v in preds}
questions = {
'1': {
'qid': 1,
'test_code': 'Question 1 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution'
},
'2': {
'qid': '2',
'test_code': 'Question 2 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution'
},
'3': {
'qid': 3,
'test_code': 'Question 3 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution'
}
}
out_path = tmp_path.joinpath('out')
out_path.mkdir()
result = run_execution.setup_language_code_dirs(
out_path, LanguageRegistry.get_language('C++'), preds, questions, True)
expected_1 = Prediction('1',
'1',
'C++',
'Pred 1.1',
out_path.joinpath('1_1', '1_1.cpp'),
entry_fn_name='test')
expected_2 = Prediction('2',
'1',
'C++',
'Pred 1.2',
out_path.joinpath('1_2', '1_2.cpp'),
entry_fn_name='test')
expected_3 = Prediction('1',
'2',
'C++',
'Pred 2.1',
out_path.joinpath('2_1', '2_1.cpp'),
entry_fn_name='test')
expected = {
'1/1': {
'prediction': expected_1,
'full_code': 'Question 1 Pred 1.1 test'
},
'1/2': {
'prediction': expected_2,
'full_code': 'Question 1 Pred 1.2 test'
},
'2/1': {
'prediction': expected_3,
'full_code': 'Question 2 Pred 2.1 test'
}
}
assert result == expected
for v in expected.values():
assert v['prediction'].file_path.exists()
contents = v['prediction'].file_path.read_text()
assert contents == v['full_code']
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the driver functions."""
import json
import pathlib
from unittest import mock
import pytest # pylint: disable=unused-import
from babelcode import code_generator
from babelcode import data_types
from babelcode import drivers
from babelcode import languages
def test_generate_code_for_questions(sample_question_info):
language = languages.LanguageRegistry.get_language('Python')
questions = [
data_types.Question.from_dict(d) for d in sample_question_info.values()
]
with mock.patch('babelcode.schema_parsing.parse_schema_and_input_order'
) as mock_parse_schema:
mock_parse_schema.return_value = ('MOCK_SCHEMA', 'MOCK_INPUT_ORDER')
with mock.patch(
'babelcode.drivers._generate_question_code') as mock_generate:
mock_generate.return_value = 'MOCK_QUESTION'
with mock.patch('babelcode.drivers.generate_prompt_info') as mock_prompt:
mock_prompt.return_value = 'MOCK_PROMPT'
result, result_failures = drivers.generate_code_for_questions(
questions, language)
assert len(result) == 3
assert all(r == ('MOCK_QUESTION', 'MOCK_PROMPT') for r in result)
assert not result_failures
assert mock_generate.call_count == len(questions)
for i, mock_call_args in enumerate(mock_generate.call_args_list):
call_kwargs = mock_call_args.kwargs
assert set(call_kwargs.keys()) == {
'question',
'schema',
'input_order',
'literal_translator',
'template_map',
'prompt_translator',
}
assert call_kwargs['question'] == questions[i]
assert call_kwargs['schema'] == 'MOCK_SCHEMA'
assert call_kwargs['input_order'] == 'MOCK_INPUT_ORDER'
assert mock_parse_schema.call_count == len(questions)
assert all(v.kwargs['raw_schema'] == questions[i].schema
for i, v in enumerate(mock_parse_schema.call_args_list))
def test_generate_prompt_info():
question = data_types.Question(
'1',
schema={
'params': [{
'name': 'scooby',
'type': 'integer'
},],
'return': {
'type': 'string'
},
},
test_list=[{
'idx': 0,
'inputs': {
'scooby': 1
},
'outputs': 'test',
}],
entry_fn_name='entry',
entry_cls_name='Solution',
title='Test',
use_type_annotation=True,
text='Testing the C++ Question Prompts for entry',
)
lang = languages.LanguageRegistry.get_language('Python')
prompt_translator = lang.make_prompt_translator()
template_map = code_generator.load_template_map(lang.make_template_map())
result = drivers.generate_prompt_info(
question,
language=lang,
prompt_translator=prompt_translator,
template_map=template_map,
force_type_annotations=False,
obfuscation_fn=code_generator.naive_obfuscation,
)
assert set(result.keys()) == {
'qid',
'arguments',
'signature',
'signature_with_docstring',
'text',
'header',
'entry_fn_name',
'entry_cls_name',
}
assert result['qid'] == question.qid
assert result['signature'] == 'def model_prediction(arg0: int) -> str:'
assert (
result['text'] == 'Testing the C++ Question Prompts for model_prediction')
assert result['entry_fn_name'] == 'model_prediction'
assert result['entry_cls_name'] == 'Prediction'
def test_setup_language_code_dirs(tmp_path):
preds = [
{
'id': '1',
'qid': '1',
'code': 'Pred 1.1',
'entry_fn_name': 'testd'
},
{
'id': '2',
'qid': '1',
'code': 'Pred 1.2',
'entry_fn_name': 'tesrt'
},
{
'id': '1',
'qid': '2',
'code': 'Pred 2.1',
'entry_fn_name': 'teqst',
},
{
'id': '1',
'qid': '10',
'code': 'Pred 10.1',
'entry_fn_name': 'tsest'
},
]
preds = {f'{v["qid"]}/{v["id"]}': v for v in preds}
questions = {
'1': {
'qid': 1,
'test_code': 'Question 1 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution',
},
'2': {
'qid': '2',
'test_code': 'Question 2 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution',
},
'3': {
'qid': 3,
'test_code': 'Question 3 PLACEHOLDER_CODE_BODY PLACEHOLDER_FN_NAME',
'entry_fn_name': 'test',
'entry_cls_name': 'Solution',
},
}
out_path = tmp_path.joinpath('out')
out_path.mkdir()
result = drivers.setup_language_code_dirs(
out_path,
languages.LanguageRegistry.get_language('C++'),
preds,
questions,
True,
)
expected_1 = data_types.Prediction(
'1',
'1',
'C++',
'Pred 1.1',
out_path.joinpath('1_1', '1_1.cpp'),
entry_fn_name='test',
)
expected_2 = data_types.Prediction(
'2',
'1',
'C++',
'Pred 1.2',
out_path.joinpath('1_2', '1_2.cpp'),
entry_fn_name='test',
)
expected_3 = data_types.Prediction(
'1',
'2',
'C++',
'Pred 2.1',
out_path.joinpath('2_1', '2_1.cpp'),
entry_fn_name='test',
)
expected = {
'1/1': expected_1,
'1/2': expected_2,
'2/1': expected_3,
}
assert result == expected
for v in expected.values():
assert v.file_path.exists()
contents = v.file_path.read_text()
assert v.code in contents
def test_load_progress_from_dir(tmp_path: pathlib.Path):
"""Tests the loading of progress from a directory."""
with tmp_path.joinpath('test.jsonl').open('w') as f:
f.write('This should not appear!')
cpp_execution_result = data_types.ExecutionResult(
data_types.Prediction(
qid='2',
id='1',
lang='C++',
code='cpp_code',
file_path=pathlib.Path('1.cpp').resolve().absolute(),
),
commands=[data_types.Command('test command')],
stdout='',
stderr='Test Stderr',
net_runtime=0.1,
command_runtimes=[None],
command_memory=[None],
return_code=1,
last_ran_command_idx=0,
)
with tmp_path.joinpath('C++_execution_results.jsonl').open('w') as f:
f.write(json.dumps(cpp_execution_result.to_dict()) + '\n')
py_execution_result = data_types.ExecutionResult(
data_types.Prediction(
qid='2',
id='1',
lang='Python',
code='cpp_code',
file_path=pathlib.Path('1.cpp').resolve().absolute(),
),
commands=[data_types.Command('test command')],
stdout='',
stderr='Test Stderr',
net_runtime=0.1,
command_runtimes=[None],
command_memory=[None],
return_code=1,
last_ran_command_idx=0,
)
with tmp_path.joinpath('Python_execution_results.jsonl').open('w') as f:
f.write(json.dumps(py_execution_result.to_dict()) + '\n')
result = drivers.load_progress_from_dir(tmp_path)
expected = {
'Python': {
'2/1': py_execution_result
},
'C++': {
'2/1': cpp_execution_result
},
}
assert result == expected
@pytest.mark.parametrize('with_progress', [True, False],
ids=['With Progress', 'No Progress'])
def test_run_execution_for_lang(tmp_path, passing_prediction, with_progress):
language = languages.LanguageRegistry.get_language('Python')
def _make_pred(qid, id):
p_dict = passing_prediction.to_dict()
p_dict['qid'] = qid
p_dict['id'] = id
return data_types.Prediction.from_dict(p_dict, passing_prediction.file_path,
language.name)
question_mapping = {'1': {'title': 'Test Question'}}
preds = {
'1/1': _make_pred('1', '1'),
'1/2': _make_pred('1', '2'),
'2/1': _make_pred('2', '1'),
}
expected_process_input_results = set(preds.keys())
progress = {}
if with_progress:
progress = {'1/2': preds.pop('1/2')}
with mock.patch(
'babelcode.drivers.setup_language_code_dirs') as mock_setup_dirs:
mock_setup_dirs.return_value = {
k: {
'prediction': v,
'full_code': None
} for k, v in preds.items()
}
with mock.patch('babelcode.execution.execute_predictions') as mock_execute:
mock_execute.return_value = (list(preds.values()), '1:1:1')
with mock.patch('babelcode.drivers._process_results') as mock_process:
mock_process.return_value = ['A', 'B']
result = drivers.execute_bc_predictions(
lang=language,
question_mapping=question_mapping,
raw_predictions={
**preds,
**progress
},
debug_dir_path=tmp_path,
output_path=tmp_path,
executed_predictions=progress,
seed=1,
step=1,
force_question_entry=False,
)
assert result == ['A', 'B']
passed_results = mock_process.call_args_list[0].kwargs['raw_results']
actual_ids = {f'{result.qid}/{result.id}' for result in passed_results}
assert set(
mock_setup_dirs.call_args_list[0].kwargs['predictions']) == set(
preds.keys())
assert actual_ids == expected_process_input_results
assert mock_execute.call_count == 1
assert mock_process.call_count == 1
assert mock_setup_dirs.call_count == 1
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
import math
import pathlib
import pytest # pylint: disable=unused-import
from babelcode import metrics
from babelcode.data_types import result_types
from babelcode.data_types.command import Command
from babelcode.data_types.prediction import Prediction
from babelcode.data_types.result_types import ExecutionResult
from babelcode.data_types.result_types import PredictionOutcome
from babelcode.data_types.result_types import QuestionResult
def test_calculate_metrics_from_raw_results():
pass_result = ExecutionResult(prediction=Prediction('0', '0', 'Python',
'pass_1',
pathlib.Path('Test')),
commands=[Command(['test', 'command'])],
stdout='TEST-0...PASSED\n',
stderr='',
net_runtime=1.0,
return_code=0,
last_ran_command_idx=0,
had_error=False,
command_runtimes=[1.0],
command_memory=[1])
fail_result = ExecutionResult(prediction=Prediction('1', '0', 'Python',
'fail_1',
pathlib.Path('Test')),
commands=[Command(['test', 'command'])],
stdout='TEST-0...FAILED\n',
stderr='',
net_runtime=2.0,
return_code=0,
last_ran_command_idx=0,
had_error=False,
command_runtimes=[1.0],
command_memory=[1])
fail_result_2 = ExecutionResult(prediction=Prediction('0', '1', 'Python',
'fail_2',
pathlib.Path('Test')),
commands=[Command(['test', 'command'])],
stdout='TEST-0...MISSING\n',
stderr='FAIL',
net_runtime=2.0,
return_code=0,
last_ran_command_idx=0,
had_error=True,
command_runtimes=[1.0],
command_memory=[1])
raw_results = [pass_result for _ in range(5)]
raw_results.extend([fail_result for _ in range(5)])
raw_results.extend([fail_result_2 for _ in range(7)])
question_data = {
'0': {
'test_case_ids': ['0'],
'title': 'Q1',
'tags': ['QT1']
},
'1': {
'test_case_ids': ['0'],
'title': 'Q2',
'tags': ['QT2']
}
}
result_metrics, q_results, p_results = metrics.calculate_metrics_from_raw_results(
raw_results,
question_data,
runtime='0:12:34',
seed=1,
k_vals=[1, 10, 15],
num_preds_per_question=10,
subsampling_rounds=10,
subsampling_iter_per_round=3,
tracked_pred_attrs=['net_runtime'],
include_outcome_pct=True)
assert len(q_results) == 2
assert len(p_results) == 17
assert all(f'estimate_pass@{k}' in result_metrics for k in [1, 10, 15])
assert all(f'subsampling_pass@{k}' in result_metrics for k in [1, 10, 15])
for k_val in [1, 10]:
assert isinstance(result_metrics[f'subsampling_pass@{k_val}_var'], float)
for k in [1, 10, 15]:
del result_metrics[f'estimate_pass@{k}']
del result_metrics[f'subsampling_pass@{k}']
del result_metrics[f'subsampling_pass@{k}_var']
expected_net_metrics = {
'num_predictions': 17,
'questions_passed': 1,
'num_questions': 2,
'total_runtime': '0:12:34',
'questions_passed_pct': 50.0,
'Passed': 5,
'Timed Out': 0,
'Had Error': 7,
'Had Runtime Error': 0,
'Failed Tests': 5
}
for k in result_types.PredictionOutcome:
if k == result_types.PredictionOutcome.PASSED:
continue
expected_pct = expected_net_metrics[str(k)] / len(p_results) * 100
assert f'{k}_pct' in result_metrics
assert math.isclose(result_metrics.pop(f'{k}_pct'),
expected_pct), f'{k}_pct'
assert result_metrics == expected_net_metrics
def test_calculate_question_aggregate_metrics():
"""Tests calculating aggregate metrics for questions."""
question_0_results = QuestionResult('0',
'Python',
num_test_cases=3,
num_predictions=3)
for outcome in PredictionOutcome:
bool_arr = [False] * 3
if outcome == PredictionOutcome.HAD_ERROR:
bool_arr = [False, False, True]
elif outcome == PredictionOutcome.PASSED:
bool_arr = [True, False, False]
elif outcome == PredictionOutcome.TIMED_OUT:
bool_arr = [False, True, False]
question_0_results.results[outcome] = bool_arr
question_0_results.results['net_runtime'] = [1.0, None, None]
question_0_results.results['num_tc_passed'] = [3, 0, 0]
question_0_results.specific_test_results = {
'1': {
'FAILED': 1,
'PASSED': 2,
'MISSING': 3
},
'2': {
'MISSING': 5
}
}
question_1_results = QuestionResult('1',
'Python',
num_test_cases=3,
num_predictions=3)
for outcome in PredictionOutcome:
bool_arr = [False] * 3
if outcome == PredictionOutcome.FAILED_TEST:
bool_arr = [False, True, True]
elif outcome == PredictionOutcome.HAD_ERROR:
bool_arr = [True, False, False]
question_1_results.results[outcome] = bool_arr
question_1_results.results['net_runtime'] = [1.0, 2.0, 3.0]
question_1_results.results['num_tc_passed'] = [0, 1, 2]
question_1_results.specific_test_results = {
'1': {
'FAILED': 1,
'PASSED': 2,
'MISSING': 3
}
}
question_result_dict = {'0': question_0_results, '1': question_1_results}
net_metrics, q_metrics = metrics.calculate_question_aggregate_metrics(
question_result_dict, ['net_runtime'])
expected_keys = [str(outcome) for outcome in PredictionOutcome]
expected_keys += [
'num_predictions', 'questions_passed', 'num_questions',
'questions_passed_pct'
]
assert len(q_metrics) == 2
assert set(net_metrics) == set(expected_keys)
assert net_metrics == {
'num_predictions': 6,
'questions_passed': 1,
'num_questions': 2,
'questions_passed_pct': 50.0,
'Passed': 1,
'Timed Out': 1,
'Had Error': 2,
'Had Runtime Error': 0,
'Failed Tests': 2,
}
q_metrics = list(sorted(q_metrics, key=lambda x: x['qid']))
assert q_metrics[0] == {
'qid': '0',
'language': 'Python',
'num_predictions': 3,
'Had Error': 1,
'Passed': 1,
'Timed Out': 1,
'Had Runtime Error': 0,
'Failed Tests': 0,
'net_runtime_mean': 1.0,
'net_runtime_median': 1.0,
'num_tc_passed_mean': 3.0,
'num_tc_passed_median': 3.0,
'num_passed_N_total_tests': {
'0': 2,
'1': 0,
'2': 0,
'3': 1
},
'num_results_by_test': {
'1': {
'FAILED': 1,
'PASSED': 2,
'MISSING': 3
},
'2': {
'MISSING': 5
}
}
}
assert q_metrics[1] == {
'qid': '1',
'language': 'Python',
'num_predictions': 3,
'Had Error': 1,
'Passed': 0,
'Timed Out': 0,
'Had Runtime Error': 0,
'Failed Tests': 2,
'net_runtime_mean': None,
'net_runtime_median': None,
'num_tc_passed_mean': None,
'num_tc_passed_median': None,
'num_passed_N_total_tests': {
'0': 1,
'1': 1,
'2': 1,
'3': 0
},
'num_results_by_test': {
'1': {
'FAILED': 1,
'PASSED': 2,
'MISSING': 3
}
}
}
def test_calculate_pass_metrics():
"""Tests calculating the pass@k metrics."""
question_0_results = QuestionResult('0',
'Python',
num_test_cases=3,
num_predictions=4)
for outcome in PredictionOutcome:
bool_arr = [False] * 4
if outcome == PredictionOutcome.HAD_ERROR:
bool_arr = [False, False, True, False]
elif outcome == PredictionOutcome.PASSED:
bool_arr = [True, False, False, False]
elif outcome == PredictionOutcome.TIMED_OUT:
bool_arr = [False, True, False, False]
question_0_results.results[outcome] = bool_arr
question_0_results.results['net_runtimes'] = [1.0, None, None]
question_0_results.results['num_tc_passed'] = [3, 0, 0]
question_1_results = QuestionResult('1',
'Python',
num_test_cases=3,
num_predictions=3)
for outcome in PredictionOutcome:
bool_arr = [False] * 3
if outcome == PredictionOutcome.FAILED_TEST:
bool_arr = [False, True, True]
elif outcome == PredictionOutcome.HAD_ERROR:
bool_arr = [True, False, False]
question_1_results.results[outcome] = bool_arr
question_1_results.results['net_runtimes'] = [1.0, 2.0, 3.0]
question_1_results.results['num_tc_passed'] = [0, 1, 2]
question_result_dict = {'0': question_0_results, '1': question_1_results}
result = metrics.calculate_pass_metrics(question_result_dict,
seed=1,
k_vals=[1, 10],
num_preds_per_question=4,
subsampling_rounds=10,
subsampling_iter_per_round=4,
shuffle=True)
assert result == {
'estimate_pass@1': 12.5,
'subsampling_pass@1': 8.75,
'estimate_pass@10': None,
'subsampling_pass@10': None,
'subsampling_pass@1_var': 360.9375,
'subsampling_pass@10_var': None
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing Utilities."""
import pathlib
from typing import Dict
import pytest # pylint: disable=unused-import
import yaml
from babelcode import code_generator
from babelcode import languages
from babelcode import schema_parsing
from babelcode import utils
# Comment this out and replace with [LANG_NAME] to test a single language.
LANGS_TO_TEST = schema_parsing.LanguageSchemaSpecRegistry.list_languages()
# LANGS_TO_TEST = ['Python']
CODE_DIR = pathlib.Path(utils.FIXTURES_PATH, 'language_data')
# Define these shortcuts for ease of use.
DATA_STRUCTURES = [
'TYPE_NAME_1[]',
'list<list<TYPE_NAME_1>>',
'map<TYPE_NAME_1;TYPE_NAME_1>',
'set<TYPE_NAME_1>',
]
class LanguageSpec:
"""A testing specification for a language.
This helper class defines the different testing specs for setting up
automated testing for each language.
Attributes:
name: The name of the language.
lang_dir: The directory where this languages specific features are stored.
testing_spec: The testing specification of inputs and outputs.
func_template_path: The path to the template function to use for testing.
"""
def __init__(self, name: str):
"""Initializes the language spec.
Args:
name: The name of the language.
"""
self.name = name
self.lang_dir = CODE_DIR / self.name
self.testing_spec = yaml.load(
self.lang_dir.joinpath('spec.yaml').open(), yaml.Loader)
self.func_template_path = self.lang_dir / 'func_template.txt'
def __repr__(self) -> str:
"""Gets the name of the language."""
return self.name
def __getitem__(self, key: str) -> Dict[str, schema_parsing.SchemaValueType]:
"""Gets the testing specification for key."""
return self.testing_spec[key]
def get(
self, key: str, default_value: schema_parsing.SchemaValueType
) -> schema_parsing.SchemaValueType:
"""Gets the testing specification for key and with value."""
return self.testing_spec.get(key, default_value)
LANGUAGE_SPECS = {l: LanguageSpec(name=l) for l in LANGS_TO_TEST}
class BaseLanguageTestingClass:
"""Base class for language specific tests."""
def _setup_test(self, lang_name):
"""Performs setup for the language test.
Args:
lang_name: The name of the language.
"""
self.lang_spec = LANGUAGE_SPECS[lang_name]
self.lang_name = lang_name
try:
self.lang = languages.LanguageRegistry.get_language(lang_name)
self.literal_translator = self.lang.make_literal_translator()
except:
self.lang = None
self.literal_translator = None
try:
self.template_map = code_generator.load_template_map(
self.lang.make_template_map())
except:
self.template_map = None
self.schema_spec = schema_parsing.LanguageSchemaSpecRegistry.get_lang_spec(
self.lang_name)
def get_primitive_data(self, convert_type: str) -> Dict[str, str]:
"""Gets the primitive language specification for a generic."""
return self.lang_spec['primitives'][convert_type]
def parse_lang_schema(
self,
schema: schema_parsing.SchemaMapType) -> schema_parsing.SchemaMapType:
"""Parses the language types from the schema."""
return schema_parsing.parse_language_schema(self.schema_spec, schema)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the execution functions."""
import json
import os
import pytest
from babelcode import execution
from babelcode import utils
from babelcode.data_types.command import Command
from babelcode.languages import LanguageRegistry
def setup_module(_):
"""Setup the environment so execution is allowed."""
os.environ['ALLOW_EXECUTION'] = 'true'
def teardown_module(_):
"""Disable execution on teardown."""
os.environ['ALLOW_EXECUTION'] = 'true'
def make_python_commands(file_path):
return [Command(['python', file_path.name])]
def test_execute_code_ran(passing_prediction):
result = execution.execute_code(
prediction=passing_prediction,
commands=make_python_commands(passing_prediction.file_path),
)
assert result.prediction == passing_prediction
assert result.return_code == 0
assert not result.stderr
assert result.stdout == 'TEST-0...PASSED\n'
assert result.last_ran_command_idx == 0
assert not result.had_error
assert not result.timed_out
assert result.all_commands_ran
def test_execute_code_fail(failing_prediction):
result = execution.execute_code(
prediction=failing_prediction,
commands=make_python_commands(failing_prediction.file_path),
)
assert result.prediction == failing_prediction
assert result.return_code == 1
assert 'This should fail!' in result.stderr
assert not result.stdout
assert result.last_ran_command_idx == 0
assert result.had_error
assert not result.timed_out
assert result.all_commands_ran
@pytest.mark.parametrize('num_workers', [1, 2], ids=['Single', 'Parallel'])
def test_execute_predictions(num_workers, passing_prediction,
failing_prediction, tmp_path):
prediction_list = [
passing_prediction,
failing_prediction,
passing_prediction,
failing_prediction,
]
with execution.time_limit(3):
results, runtime = execution.execute_predictions(
prediction_list,
LanguageRegistry.get_language('Python'),
tmp_path,
num_workers,
update_freq=3,
)
debug_results = list(
map(json.loads,
tmp_path.joinpath('Python_runtime_tracking.jsonl').open()))
assert len(debug_results) == 1
assert [d['completed'] for d in debug_results] == [3]
execution_results = list(
map(json.loads,
tmp_path.joinpath('Python_execution_results.jsonl').open()))
assert len(execution_results) == 4
result = []
for i, d in enumerate(results):
assert d.to_dict() == execution_results[i]
result.append((d.prediction.id, d.had_error, d.return_code))
assert set(result) == {
(passing_prediction.id, False, 0),
(failing_prediction.id, True, 1),
(passing_prediction.id, False, 0),
(failing_prediction.id, True, 1),
}
assert isinstance(runtime, str)
assert runtime.count(':') == 2
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for result data types."""
import dataclasses
import json
import math
import pathlib
import pytest
from babelcode import data_types
from babelcode.data_types import result_types
from babelcode.data_types.command import Command
from babelcode.data_types.prediction import Prediction
PredictionOutcome = result_types.PredictionOutcome
ExecutionResult = result_types.ExecutionResult
def test_read_execution_results_from_file(tmp_path: pathlib.Path):
"""Tests reading execution results from a file."""
expected_results = {
'C++': {
'1/test':
ExecutionResult(
Prediction(
qid='1',
id='test',
lang='C++',
code='cpp_code',
file_path=pathlib.Path('1.cpp').resolve().absolute(),
),
commands=[Command('test command')],
net_runtime=0.1,
stdout='Test STDOUT',
stderr='',
command_runtimes=[None],
command_memory=[None],
return_code=1,
last_ran_command_idx=0,
)
},
'Python': {
'2/test':
ExecutionResult(
Prediction(
qid='2',
id='test',
lang='Python',
code='cpp_code',
file_path=pathlib.Path('1.cpp').resolve().absolute(),
),
commands=[Command('test command')],
stdout='',
stderr='Test Stderr',
net_runtime=0.1,
command_runtimes=[None],
command_memory=[None],
return_code=1,
last_ran_command_idx=0,
),
},
}
input_path = tmp_path.joinpath('exec.jsonl')
with input_path.open('w') as f:
for i in (r for x in expected_results.values() for r in x.values()):
f.write(json.dumps(i.to_dict()) + '\n')
f.write(';\n')
result = result_types.read_execution_results_from_file(input_path)
assert len(result) == len(expected_results), 'Different number of results'
for lang, expected_lang_res in expected_results.items():
assert set(result.get(lang, {}).keys()) == set(
expected_lang_res.keys()), f'{lang} does not have right keys.'
expected_attrs = dataclasses.asdict(next(iter(expected_lang_res.values())))
for key, expected_exec_result in expected_lang_res.items():
actual = result[lang][key]
for k in expected_attrs:
result_value = getattr(actual, k)
expected_value = getattr(expected_exec_result, k)
assert (result_value == expected_value
), f'result[{i}].{k} does not have the correct value'
assert len(input_path.read_text().strip().split('\n')) == 2
class TestPredictionResult:
@pytest.mark.parametrize(
['return_code', 'stdout', 'stderr', 'expected'],
[
(1, '', '', PredictionOutcome.HAD_ERROR),
(0, 'TEST-0...MISSING\n', '', PredictionOutcome.HAD_ERROR),
(0, 'TEST-0...KeyError\n', '', PredictionOutcome.HAD_RUNTIME_ERROR),
(0, 'TEST-0...FAILED\n', '', PredictionOutcome.FAILED_TEST),
(0, 'TEST-0...PASSED\n', '', PredictionOutcome.PASSED),
(0, 'TEST-0...PASSED\n', 'Warnings:', PredictionOutcome.PASSED),
],
ids=[
'RTR_Fail',
'MISSING',
'RuntimeError',
'FailedTests',
'Passed',
'Passed_Warnings',
],
)
def test_process_execution_result_failures(self, return_code, stdout, stderr,
expected):
pred = Prediction(
qid='1',
id='test',
lang='C++',
code='cpp_code',
file_path=pathlib.Path('1.cpp'),
)
exec_result = result_types.ExecutionResult(
prediction=pred,
commands=[Command('test command')],
stdout=stdout,
stderr=stderr,
net_runtime=1.0,
return_code=return_code,
last_ran_command_idx=0,
had_error=return_code != 0,
command_runtimes=[0.9, None],
command_memory=[10, None],
)
if not stdout:
tc_results = {'0': 'MISSING'}
else:
tc_results = {'0': stdout.split('.')[-1].strip()}
result = result_types.PredictionResult.from_execution_result(
exec_result, {'test_case_ids': ['0']})
expected_result = result_types.PredictionResult(
qid=exec_result.prediction.qid,
id=exec_result.prediction.id,
lang=exec_result.prediction.lang,
outcome=expected,
test_case_results=tc_results,
num_tc_passed=sum(v == 'PASSED' for v in tc_results.values()),
num_tc=1,
all_commands_ran=True,
net_runtime=1.0,
code='cpp_code',
stderr=stderr,
final_command_memory=10,
final_command_runtime=0.9,
command_memory=[10, None],
command_runtimes=[0.9, None],
)
assert result == expected_result
@pytest.mark.parametrize(
['tc_results', 'expected_outcome'],
[
(
{
'1': 'PASSED',
'2': 'PASSED',
'CHEESE.BALL': 'PASSED'
},
PredictionOutcome.PASSED,
),
(
{
'1': 'PASSED',
'2': 'FAILED',
'CHEESE.BALL': 'PASSED'
},
PredictionOutcome.FAILED_TEST,
),
],
ids=['passed', 'failed_test'],
)
def test_process_execution_result_passes(self, tc_results, expected_outcome):
stdout = '\n'.join(f'TEST-{k}...{v}' for k, v in tc_results.items())
pred = Prediction(
qid='1',
id='test',
lang='C++',
code='cpp_code',
file_path=pathlib.Path('1.cpp'),
)
exec_result = result_types.ExecutionResult(
prediction=pred,
commands=[Command('test command')],
stdout=stdout,
stderr='',
net_runtime=1.0,
return_code=0,
last_ran_command_idx=1,
command_runtimes=[1.0, 2.0],
command_memory=[0, 10],
)
result: result_types.PredictionResult = (
result_types.PredictionResult.from_execution_result(
exec_result, {'test_case_ids': ['1', '2', 'CHEESE.BALL']}))
assert result.outcome == expected_outcome
assert result.test_case_results == tc_results
assert result.num_tc == 3
assert result.num_tc_passed == sum(
1 if v == 'PASSED' else 0 for v in tc_results.values())
assert result.net_runtime == 1.0
assert result.code == pred.code
assert math.isclose(result.final_command_runtime, 2.0)
assert result.final_command_memory == 10
def test_process_execution_incorrect_all_missing(self):
stdout = 'TEST-10...PASSED\nTEST-4...PASSED\nTEST-5...PASSED\n'
pred = Prediction(
qid='1',
id='test',
lang='C++',
code='cpp_code',
file_path=pathlib.Path('1.cpp'),
)
exec_result = result_types.ExecutionResult(
prediction=pred,
commands=[Command('test command')],
stdout=stdout,
stderr='',
return_code=0,
net_runtime=1.0,
last_ran_command_idx=0,
command_runtimes=[1.0],
command_memory=[10],
)
result = result_types.PredictionResult.from_execution_result(
exec_result, {'test_case_ids': ['0', '1', '2']})
assert result.outcome == PredictionOutcome.HAD_ERROR
assert result.test_case_results == {
'0': 'MISSING',
'1': 'MISSING',
'2': 'MISSING',
}
assert result.net_runtime == 1.0
assert result.code == pred.code
@pytest.fixture
def prediction_result(request) -> result_types.PredictionResult:
default_kwargs = {
'qid': '0',
'id': '0',
'lang': 'Python',
'outcome': PredictionOutcome.PASSED,
'test_case_results': {
'0': 'PASSED',
'1': 'PASSED'
},
'net_runtime': 1.1,
'num_tc_passed': 2,
'num_tc': 2,
'all_commands_ran': True,
'final_command_runtime': 1.0,
'final_command_memory': 10,
'command_runtimes': [1.0],
'command_memory': [10],
'stderr': '',
'code': 'Test Code',
}
pred_kwargs = {}
for k, v in default_kwargs.items():
if hasattr(request, 'params'):
pred_kwargs[k] = request.params.get(k, v)
else:
pred_kwargs[k] = v
yield result_types.PredictionResult(**pred_kwargs)
class TestQuestionResult:
def test_update_with_result(self, prediction_result):
result = result_types.QuestionResult(
'0', 'Python', 2, tracked_attributes=['final_command_memory'])
result.update_with_result(prediction_result)
assert result.num_predictions == 1
expected_keys = set(PredictionOutcome)
expected_keys.update(['final_command_memory', 'num_tc_passed'])
assert set(result.results.keys()) == expected_keys
expected_outcome = prediction_result.outcome
outcome_results = {v: result.results[v] for v in PredictionOutcome}
expected_outcome_results = {}
for v in PredictionOutcome:
if v == expected_outcome:
expected_outcome_results[v] = [True]
else:
expected_outcome_results[v] = [False]
assert outcome_results == expected_outcome_results
assert result.results['final_command_memory'] == [
prediction_result.final_command_memory
]
assert len(result.specific_test_results) == prediction_result.num_tc
for k, v in prediction_result.test_case_results.items():
assert k in result.specific_test_results, f'Missing test "{k}"'
assert result.specific_test_results[k] == {
v: 1
}, f'{k} does not have correct value.'
def test_get_vals_for_idx(self, prediction_result):
q_result = result_types.QuestionResult(
'0', 'Python', 2, tracked_attributes=['final_command_memory'])
q_result.update_with_result(prediction_result)
result = q_result.get_vals_for_idx(0)
expected_results = {k: False for k in PredictionOutcome}
expected_results[PredictionOutcome.PASSED] = True
expected_results['final_command_memory'] = 10
expected_results['num_tc_passed'] = 2
assert result == expected_results
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for question data types."""
import copy
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
import json
import pathlib
from typing import Dict, Tuple
import pytest
from babelcode import data_types
Question = data_types.Question
@pytest.fixture()
def expected_schema():
"""Expected schema fixture."""
yield {
'params': [{
'name': 'arg',
'type': 'integer'
},],
'return': {
'type': 'string'
},
}
@pytest.fixture()
def expected_tests():
"""Expected tests fixture."""
yield [{'idx': 0, 'inputs': {'arg': 1}, 'outputs': 'test'}]
@pytest.fixture()
def schema_dict_question(expected_schema, expected_tests):
"""Fixture for a question from a schema dict."""
input_dict = {
'qid': 4,
'title': 'Valid schema dict',
'schema': {
'params': [{
'name': 'arg',
'type': 'integer'
}],
'return': {
'type': 'string'
},
},
'test_list': [{
'idx': 0,
'inputs': {
'arg': 1
},
'outputs': 'test'
}],
'entry_fn_name': 'test_dict',
'text': 'This question has NL',
}
question = Question(
qid='4',
title='Valid schema dict',
schema=expected_schema,
test_list=expected_tests,
entry_fn_name='test_dict',
text='This question has NL',
)
yield (input_dict, question)
# Don't make this into a fixture so we can parametrize it.
INVALID_QUESTION_DICTS = [
{
'qid': 1,
'title': 'Fail no schema dict',
'schema': {}
},
{
'qid': 2,
'title': 'Fail no schema list',
'schema': [{
'name': '1'
}],
},
{
'qid': 3,
'title': 'Fail wrong schema type',
'schema': 1
},
]
# Disable pylint bare generics because otherwise we would have a massive,
# unreadable type annotation.
# pylint:disable=g-bare-generic
def test_read_input_questions(
tmp_path: pathlib.Path,
schema_dict_question: Tuple[Dict, Question],
):
"""Test the read_input_questions function."""
data_path = pathlib.Path(tmp_path, 'questions.jsonl')
input_questions = [
*INVALID_QUESTION_DICTS,
schema_dict_question[0],
]
with data_path.open('w') as f:
for q in input_questions:
f.write(json.dumps(q) + '\n')
result, failed = data_types.read_input_questions(data_path)
# These are the raw line dictionaries, so need to convert them to strings
# first.
failed_ids = {str(q[0]['qid']) for q in failed}
assert failed_ids == {'1', '2', '3'}
expected = [schema_dict_question[1]]
assert result == expected
class TestQuestion:
# Disable pylint bare generics because otherwise we would have a massive,
# unreadable type annotation.
# pylint:disable=g-bare-generic
def test_from_dict_schema_dict(self, schema_dict_question: Tuple[Dict,
Question]):
"""Test the from_dict function with a schema dict."""
input_dict, expected = schema_dict_question
assert Question.from_dict(input_dict) == expected
@pytest.mark.parametrize(
'input_dict',
INVALID_QUESTION_DICTS,
ids=[d['title'] for d in INVALID_QUESTION_DICTS],
)
def test_from_dict_failures(self, input_dict):
"""Test that failures raise QuestionError."""
with pytest.raises(data_types.QuestionParsingError):
_ = Question.from_dict(input_dict)
def test_change_var_names(self):
schema = {
'params': [
{
'name': 'always_money_in',
'type': 'integer'
},
{
'name': 'testing',
'type': 'boolean'
},
],
'return': {
'type': 'string'
},
}
tests = [
{
'idx': 0,
'inputs': {
'always_money_in': 1,
'testing': True
},
'outputs': 'test',
},
{
'idx': 1,
'inputs': {
'always_money_in': 2,
'testing': False
},
'outputs': 'test',
},
]
expected_change_schema = copy.deepcopy(schema)
expected_change_schema['params'][1] = {
'name': 'the_banana_stand',
'type': 'boolean',
}
expected_change_tests = copy.deepcopy(tests)
for i in range(len(expected_change_tests)):
expected_change_tests[i]['inputs'][
'the_banana_stand'] = expected_change_tests[i]['inputs'].pop(
'testing')
question = Question('1',
schema=schema,
test_list=tests,
entry_fn_name='test',
title='Test')
question.change_var_names({'testing': 'the_banana_stand'})
expected_question = Question(
'1',
schema=expected_change_schema,
test_list=expected_change_tests,
entry_fn_name='test',
title='Test',
)
assert question == expected_question
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the prediction data type."""
import pathlib
from babelcode import data_types
class TestPrediction:
"""Tests for the prediction data type."""
def test_prediction_from_dict(self):
"""Test `from_dict()`."""
file_path = pathlib.Path('test.py')
input_dict = {
'id': 1,
'qid': 2,
'code': 'Prediction code',
'entry_fn_name': 'test_fn',
'entry_cls_name': 'test_cls'
}
result = data_types.Prediction.from_dict(pred_dict=input_dict,
file_path=file_path,
default_language='vba')
assert result == data_types.Prediction(id='1',
qid='2',
code='Prediction code',
entry_fn_name='test_fn',
entry_cls_name='test_cls',
lang='vba',
file_path=file_path)
def test_prediction_to_dict(self):
"""Test `to_dict()`."""
result = data_types.Prediction(id='1',
qid='2',
code='Prediction code',
entry_fn_name='test_fn',
entry_cls_name='test_cls',
lang='vba',
file_path=pathlib.Path('test.py')).to_dict()
assert result == {
'id': '1',
'qid': '2',
'code': 'Prediction code',
'entry_fn_name': 'test_fn',
'entry_cls_name': 'test_cls',
'lang': 'vba',
'file_path': str(pathlib.Path('test.py').resolve().absolute())
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for primitive_translator."""
from typing import Any, List
import pytest
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
SchemaType = schema_parsing.SchemaType
class DummyLiteralTranslator(translation.LiteralTranslator):
"""Dummy LiteralTranslator for testing."""
def format_list(self, generic_type: SchemaType,
list_values: List[Any]) -> str:
"""Format the list of values to the code to initialize the list.
Args:
generic_type: The underlying schema type for the list.
list_values: The list of strings that are the literal initialization code
for each element of the list.
Returns:
The code to initialize a list object in the current language.
"""
return f'({generic_type.to_generic()}, list[{", ".join(list_values)}])'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Format the list of values to the code to initialize the set.
Args:
generic_type: The underlying schema type for the list.
set_values: The list of strings that are the literal initialization code
for each element of the set.
Returns:
The code to initialize a set object in the current language.
"""
# Some languages require the generic_type to create the set.
return f'({generic_type.to_generic()}, set[{", ".join(set_values)}])'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Format the map with keys and entries to the code to initialize the map.
We include the `key_type` and `value_type` for languages that require them
to initialize the map(i.e. Golang).
Args:
key_type: The SchemaType of the key_type.
value_type: The SchemaType of the value.
entries: The list of code to initialize the entries.
Returns:
The code to initialize an map object in the current language.
"""
return (f'({key_type.to_generic()}, {value_type.to_generic()},'
f' {", ".join(entries)})')
def format_map_entry(self, key: str, value: str) -> str:
"""Format a single map entry to the literal code.
Args:
key: The code to initialize the key.
value: The code to initialize the value.
Returns:
The code to make the single entry.
"""
return f'[{key}|{value}]'
class TestLiteralTranslator:
def setup_method(self):
"""Setup for each test."""
self.literal_translator = DummyLiteralTranslator(
'Testing',
naming_convention=utils.NamingConvention.CAMEL_CASE,
convert_primitive_fn=translation.make_primitive_translator({}),
)
def test_generate_test_case_literals(self):
"""Test the generate test case literals function."""
input_tc = {
'idx': '0',
'inputs': {
'arg0': 1,
'arg2': 'Hello World\n'
},
'outputs': 1e-2,
}
schema = {
'arg0': SchemaType('integer'),
'arg2': SchemaType('string'),
data_types.EXPECTED_KEY_NAME: SchemaType('float'),
}
input_order = ['arg2', 'arg0']
result = self.literal_translator.generate_test_case_literals(
'1', input_tc, schema, input_order)
assert result == {
'idx': '0',
'inputs': ['"Hello World\\n"', '1'],
'outputs': '0.01',
}
def test_convert_primitive_to_literal(self):
"""Test the convert primitive to literal."""
generic_type = SchemaType('boolean')
result = self.literal_translator.convert_var_to_literal(generic_type, False)
assert result == 'false'
@pytest.mark.parametrize('type_str', ['set', 'list'])
def test_convert_array_like_to_literal(self, type_str):
"""Test converting array like datastructures to literal code."""
generic_type = SchemaType.from_generic_type_string(f'{type_str}<integer>')
input_values = [1, 2, 1]
result = self.literal_translator.convert_var_to_literal(
generic_type, input_values)
expected_values = input_values
if type_str == 'set':
expected_values = set(input_values)
expected_values = ', '.join(map(str, expected_values))
expected = f'({generic_type.to_generic()}, {type_str}[{expected_values}])'
assert result == expected
def test_convert_map_to_literal(self):
"""Test converting maps to literal code."""
generic_type = SchemaType.from_generic_type_string('map<string;integer>')
input_values = {'1': 1, '2': 2}
result = self.literal_translator.convert_var_to_literal(
generic_type, input_values)
expected_value = '["1"|1], ["2"|2]'
expected = '(string, integer, ' + expected_value + ')'
assert result == expected
def test_convert_list_of_sets(self):
"""Test list of sets."""
generic_type = SchemaType.from_generic_type_string('list<set<integer>>')
input_values = [[1, 2]]
expected = '(list<set<integer>>, list[(set<integer>, set[1, 2])])'
result = self.literal_translator.convert_var_to_literal(
generic_type, input_values)
assert result == expected
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for prompt_translator."""
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
from typing import Dict
import pytest
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
Question = data_types.Question
class DummyPromptTranslator(translation.PromptTranslator):
@property
def word_replacement_map(self) -> Dict[str, str]:
return {'vector': ['list'], 'map': ['dict', 'dictionary', 'dictionaries']}
@property
def signature_template(self) -> str:
return ('{%- if docstring is not none -%}{{docstring~"\n"}}{%-endif-%}' +
' def {{entry_cls_name}} {{entry_fn_name}}' +
'({{signature}}){{return_type}}:{{params|join(", ")}}')
def clean_docstring_for_lang(self, docstring: str) -> str:
return docstring.replace('.', '?')
def translate_entry_function_name(
self,
entry_fn_name: str,
) -> str:
return entry_fn_name
def translate_entry_cls_name(self, entry_cls_name: str) -> str:
return entry_cls_name
def format_docstring_for_lang(self, docstring: str) -> str:
return '| '.join(docstring.splitlines(True)).strip()
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
if use_type_annotation:
return f'{arg_name}: {arg_type.lang_type}'
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
if use_type_annotation:
return f' -> {return_type.lang_type}'
return ''
@pytest.fixture(scope='module')
def schema():
yield {
'arg0': SchemaType('integer', 'Int'),
'arg1': SchemaType('boolean', 'Bool'),
data_types.EXPECTED_KEY_NAME: SchemaType('float', 'double')
}
@pytest.fixture(scope='module')
def input_order():
yield ['arg1', 'arg0']
class TestPromptTranslator:
def setup_method(self):
self.translator = DummyPromptTranslator('testing',
utils.NamingConvention.SNAKE_CASE)
def test_translate_prompt(self):
prompt = """This is a test prompt with Lists. Testing for Python?
It should dict be formatted properly!"""
result = self.translator.translate_prompt('python', prompt, 'test')
assert result == """This is a test prompt with Vectors? Testing for Testing?
It should map be formatted properly!"""
def test_translate_signature(self, schema, input_order):
result = self.translator.translate_signature('TEST_FUNCTION', 'SOL', schema,
input_order, True)
expected = ('def SOL TEST_FUNCTION(arg1: Bool, arg0: Int) -> double:' +
'arg1, arg0')
assert result == expected
def test_translate_docstring(self, schema, input_order):
docstring = '\nTest\nTest line 2\n'
result = self.translator.translate_signature_with_docstring(
'Python', docstring, 'test', 'sol', schema, input_order, False)
assert result == ('| Test\n| Test line 2\ndef sol test(arg1, arg0):arg1, '
'arg0')
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for primitive_translator."""
import pytest
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
SchemaType = schema_parsing.SchemaType
GENERIC_TO_IO = {
'integer': (1, '1'),
'boolean': (True, 'true'),
'float': (1.0, '1.0'),
'string': ('Test string', '"Test string"'),
'character': ('t', "'t'"),
'double': (1.0, '1.0'),
'long': (1, '1'),
}
@pytest.mark.parametrize(['generic', 'value', 'expected'],
[(k, v[0], v[1]) for k, v in GENERIC_TO_IO.items()],
ids=list(GENERIC_TO_IO))
def test_make_primitive_translator_defaults(
generic: str, value: schema_parsing.SchemaValueType, expected: str):
"""Test the default conversions of the primitive converter."""
primitive_fn = translation.make_primitive_translator({})
result = primitive_fn(SchemaType(generic), value)
assert result == expected
@pytest.mark.parametrize(['generic', 'value'],
[(k, v[0]) for k, v in GENERIC_TO_IO.items()],
ids=list(GENERIC_TO_IO))
def test_make_primitive_translator_overrides(
generic: str, value: schema_parsing.SchemaValueType):
"""Test that override the conversion method only changes the specified type.
"""
override_fn = lambda v: f'{type(v).__name__} ==> {v}'
primitive_fn = translation.make_primitive_translator({generic: override_fn})
result = primitive_fn(SchemaType(generic), value)
assert result == override_fn(value)
for k, v in GENERIC_TO_IO.items():
if k == generic:
continue
assert primitive_fn(
SchemaType(k),
v[0]) == v[1], f'{k} was overridden when it should not have been.'
def test_make_primitive_translator_not_supported():
"""Test that an error is raised for unsupported primitives."""
primitive_fn = translation.make_primitive_translator({})
with pytest.raises(data_types.IOPairError):
_ = primitive_fn(SchemaType('unsupported'), 1)
@pytest.mark.parametrize('input_value', [1, 2.45, 0.22])
def test_convert_float(input_value):
"""Tests the convert float method."""
expected = str(float(input_value))
assert translation.convert_float(input_value) == expected
suffix = 'f'
assert translation.convert_float(input_value, suffix) == f'{expected}{suffix}'
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parsing questions."""
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
import ast
import copy
import json
import os
import pathlib
import pytest
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import utils as bc_utils
from babelcode.dataset_conversion import assertion_parsing
from babelcode.dataset_conversion import question_parsing
SchemaType = schema_parsing.SchemaType
ERROR_TESTS = {}
VALID_TESTS = {}
TEST_DATA_PATH = bc_utils.FIXTURES_PATH.joinpath(
'question_parsing_testdata.json')
for test_name, test_info in json.loads(TEST_DATA_PATH.read_text()).items():
if test_info.get('expected_error', None):
ERROR_TESTS[test_name] = [test_info['input'], test_info['expected_error']]
else:
VALID_TESTS[test_name] = [test_info['input'], test_info['expected']]
@pytest.fixture()
def valid_errors():
yield (
data_types.QuestionParsingError,
data_types.QuestionValidationError,
assertion_parsing.AssertionToSchemaError,
assertion_parsing.LiteralParsingError,
data_types.IOPairError,
)
def pytest_generate_tests(metafunc):
test_name = metafunc.function.__name__
if 'parse_question' in test_name:
argnames = ['question_dict', 'expected']
if 'valid' in test_name:
tests_to_use = VALID_TESTS
else:
tests_to_use = ERROR_TESTS
ids = []
arg_values = []
for test_id, test_values in copy.deepcopy(tests_to_use).items():
ids.append(test_id)
arg_values.append(test_values)
metafunc.parametrize(argnames=argnames, argvalues=arg_values, ids=ids)
def test_parse_question_valid(question_dict, expected):
test_code = question_dict.pop('test_list')
question_dict['testing_code'] = '\n'.join(test_code)
result = question_parsing.parse_question_dict(**question_dict)
assert set(result.keys()) == set(expected.keys())
assert result['entry_fn_name'] == expected['entry_fn_name']
assert result['schema'] == expected['schema']
assert len(result['test_list']) == len(expected['test_list'])
for i in range(len(result['test_list'])):
l = result['test_list'][i]
r = expected['test_list'][i]
assert l == r, f'{i=} {l=} {r=}'
def test_parse_question_error(question_dict, expected, valid_errors):
test_code = question_dict.pop('test_list')
question_dict['testing_code'] = '\n'.join(test_code)
with pytest.raises(valid_errors) as e:
_ = question_parsing.parse_question_dict(**question_dict)
assert expected in str(e)
@pytest.mark.parametrize(
['input_str', 'expected'],
[
('List[int]', 'list<integer>'),
('Dict[str,List[bool]]', 'map<string;list<boolean>>'),
('Tuple[List[str],int]', 'tuple<list<string>|integer>'),
],
)
def test_convert_type_annotation(input_str, expected):
node = ast.parse(input_str).body[0].value
result = question_parsing._convert_type_annotation_to_schema(node)
assert result == expected
@pytest.mark.parametrize(
['input_str'],
[('float[int]',), ('List',), ('List[str,str]',), ('Dict[str]',)],
)
def test_convert_type_annotation_error(input_str):
node = ast.parse(input_str).body[0].value
with pytest.raises(question_parsing.utils.AnnotationError):
_ = question_parsing._convert_type_annotation_to_schema(node)
@pytest.mark.parametrize(
['input_str', 'expected'],
[
('list<null>', 'list<integer>'),
('list<integer>', 'list<integer>'),
(None, 'list<integer>'),
],
)
def test_get_final_schema_type(input_str, expected):
schema_types = [
question_parsing.PotentialType(SchemaType.from_generic_type_string(s), s,
1, 1) for s in ['list<integer>']
]
result = question_parsing._get_final_schema_type('test', 'test', schema_types,
input_str)
assert result == expected
@pytest.mark.parametrize(
['arg_types', 'return_type'],
[
(['list<integer>', 'string'], 'list<boolean>'),
(['null', 'null'], 'null'),
([None, None], None),
],
)
def test_consolidate_schema_from_test_cases(arg_types, return_type):
test_cases = {
0: {
'inputs': [[1], 'typing'],
'outputs': [],
'schema': {
'params': [
('list<integer>', 1),
('string', 0),
],
'returns': ('list<null>', 1),
},
},
1: {
'inputs': [[], 'typing'],
'outputs': [True],
'schema': {
'params': [
('list<null>', 1),
('string', 0),
],
'returns': ('list<boolean>', 1),
},
},
2: {
'inputs': [[], ''],
'outputs': [],
'schema': {
'params': [
('list<null>', 1),
('null', 0),
],
'returns': ('list<null>', 1),
},
},
}
args = ['a', 'b']
found_types = {v: arg_types[i] for i, v in enumerate(args)}
(
result_arg_types,
result_return_type,
) = question_parsing.consolidate_schema_from_test_cases(
'test', test_cases, args, found_types, return_type)
assert result_arg_types == {'a': 'list<integer>', 'b': 'string'}
assert result_return_type == 'list<boolean>'
def test_consolidate_schema_float_and_double():
test_cases = {
0: {
'inputs': [[1]],
'outputs': [],
'schema': {
'params': [('list<null>', 1),],
'returns': ('map<string;float>', 1),
},
},
1: {
'inputs': [[]],
'outputs': [True],
'schema': {
'params': [('list<double>', 1),],
'returns': ('map<string;double>', 1),
},
},
}
args = ['a']
found_types = {'a': 'list<float>'}
(
result_arg_types,
result_return_type,
) = question_parsing.consolidate_schema_from_test_cases(
'test', test_cases, args, found_types, None)
assert result_arg_types == {'a': 'list<double>'}
assert result_return_type == 'map<string;double>'
def test_get_arguments_default_fail(valid_errors):
solution = 'def test(a, b=1):\n\tpass'
with pytest.raises(valid_errors):
_ = question_parsing.get_arguments_from_solution('Test', solution, 'test')
def test_get_arguments_invalid_annotation():
solution = 'def get_positive(l: list):\n\tpass'
args, arg_types, return_type = question_parsing.get_arguments_from_solution(
'Test', solution, 'get_positive')
assert args == ['l']
assert arg_types == {'l': None}
assert return_type is None
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the assertion parsing."""
import ast
import json
import pytest
from babelcode import utils
from babelcode.dataset_conversion import assertion_parsing
TEST_DATA = json.loads(
utils.FIXTURES_PATH.joinpath('assertion_parsing_testdata.json').read_text())
TEST_NAME_TO_DATA = {
'test_parses_basic': TEST_DATA['BASIC_ASSERTIONS'],
'test_multiple_asserts': TEST_DATA['MULTIPLE_TEST_CASES'],
}
def pytest_generate_tests(metafunc):
"""Generates the tests dynamically from the passed in test data."""
test_name = metafunc.function.__name__
test_cls = metafunc.cls
if test_cls is None:
return
argnames = None
argvalues = None
ids = None
if test_cls.__name__ == 'TestAssertionToSchemaVisitor':
if test_name in ['test_parses_basic', 'test_multiple_asserts']:
argnames = ['input_str', 'expected_output_dict']
argvalues = []
ids = []
to_use = TEST_NAME_TO_DATA[test_name]
for id_name, id_data in to_use.items():
ids.append(id_name)
input_str, output_data = id_data
# Fix the schema types from json
fixed_expected = {}
for k, v in output_data.items():
for i in range(len(v['schema']['params'])):
v['schema']['params'][i] = tuple(v['schema']['params'][i])
v['schema']['returns'] = tuple(v['schema']['returns'])
fixed_expected[int(k)] = v
argvalues.append((input_str, fixed_expected))
if argnames:
metafunc.parametrize(argnames=argnames, argvalues=argvalues, ids=ids)
class TestAssertionToSchemaVisitor:
def test_parses_basic(self, input_str, expected_output_dict):
# Test that it parses a single assertion line correctly.
visitor = assertion_parsing.AssertionToSchemaVisitor('f')
visitor.visit(ast.parse(input_str))
assert set(visitor.test_cases) == {0}
expected = expected_output_dict[0]
assert visitor.test_cases[0] == expected
def test_multiple_asserts(self, input_str, expected_output_dict):
visitor = assertion_parsing.AssertionToSchemaVisitor('f')
visitor.visit(ast.parse(input_str))
assert visitor.test_cases == expected_output_dict
@pytest.mark.parametrize(
'input_str',
[
'assert f(1) != "A"',
'assert a.f(1) == 2',
'import random\nassert a.f(1)',
'assert 1+2==3',
],
)
def test_raises_error(self, input_str):
visitor = assertion_parsing.AssertionToSchemaVisitor('f')
with pytest.raises(assertion_parsing.AssertionToSchemaError):
_ = visitor.visit(ast.parse(input_str))
def test_tuple_no_children(self):
testing_code = [
'assert clear_tuple((1, 5, 3, 6, 8)) == ()',
]
testing_code = '\n'.join(testing_code)
visitor = assertion_parsing.AssertionToSchemaVisitor('clear_tuple')
visitor.visit(ast.parse(testing_code))
assert len(visitor.test_cases) == 1
result_schema = visitor.test_cases[0]['schema']['returns']
assert result_schema == ('tuple<null>', 1)
def test_set_single_item(self):
testing_code = ['assert my_dict({})==True']
testing_code = '\n'.join(testing_code)
visitor = assertion_parsing.AssertionToSchemaVisitor('clear_tuple')
visitor.visit(ast.parse(testing_code))
assert len(visitor.test_cases) == 1
result_schema = visitor.test_cases[0]['schema']['params'][0]
assert result_schema == ('set<null>', 1)
class TestLiteralParser:
def setup_method(self, method):
self.visitor = assertion_parsing.LiteralParser()
def _parse_literal(self, code):
self.visitor.visit(ast.parse(code).body[0].value)
@pytest.mark.parametrize('input_str', ['f(x(y))', 'a(x)', 'a(1,x)'])
def test_should_fail(self, input_str):
"""Tests the cases that should fail."""
with pytest.raises(assertion_parsing.LiteralParsingError):
self._parse_literal(input_str)
def test_empty_list_nested(self):
"""Tests that the schema can properly be deduced when there is an empty list.
"""
self._parse_literal('[[],[1],[1,2,3]]')
assert self.visitor.schema_type == 'list<list<integer>>'
assert self.visitor.value == [[], [1], [1, 2, 3]]
def test_consolidate_types(self):
"""Tests that the consolidation works."""
self._parse_literal('[[1.0000000000001,1],[1,1],[1,1.0]]')
assert self.visitor.schema_type == 'list<list<double>>'
assert self.visitor.value == [
[1.0000000000001, 1.0],
[1.0, 1.0],
[1.0, 1.0],
]
def test_detects_long(self):
self._parse_literal('[3027040707]')
assert self.visitor.schema_type == 'list<long>'
assert self.visitor.value == [3027040707]
def test_converts_list_of_int_to_float(self):
"""Regression test for converting list of ints and floats to list[float]."""
self._parse_literal('[1, 3, 2.0, 8.0]')
assert self.visitor.schema_type == 'list<float>'
assert all(isinstance(v, float) for v in self.visitor.value)
assert self.visitor.value == [1.0, 3.0, 2.0, 8.0]
def convert_single_character_to_character(self):
self._parse_literal('"a"')
assert self.visitor.schema_type == 'character'
assert self.visitor.value == 'a'
def test_consolidate_types_strings(self):
"""Tests that the consolidation works."""
self._parse_literal("['3', '11111111']")
assert self.visitor.schema_type == 'list<string>'
assert self.visitor.value == ['3', '11111111']
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing code prompt translation in each language."""
import pytest
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
from tests import utils as testing_utils
SchemaType = schema_parsing.SchemaType
@pytest.mark.parametrize('lang_name', testing_utils.LANGS_TO_TEST)
class TestLanguagePromptTranslation(testing_utils.BaseLanguageTestingClass):
def _setup_test(self, lang_name):
super()._setup_test(lang_name)
self.prompt_translator = self.lang.make_prompt_translator()
self.prompt_spec = self.lang_spec['prompt_translation']
def test_format_docstring(self, lang_name):
"""Test that formatting the docstring works."""
self._setup_test(lang_name)
input_docstring = 'Test Docstring.\n/**///*/--"""'
expected = self.prompt_spec['docstring']
cleaned_docstring = self.prompt_translator.clean_docstring_for_lang(
input_docstring)
result = self.prompt_translator.format_docstring_for_lang(cleaned_docstring)
assert result == expected
def test_signature_argument(self, lang_name):
"""Tests that the translating an argument in signature works."""
self._setup_test(lang_name)
type_name = self.lang_spec['primitives']['string']['type_name']
expected = self.prompt_spec['signature_argument'].replace(
'TYPE_NAME', type_name)
result = self.prompt_translator.translate_signature_argument_to_lang(
'arg_name', SchemaType('string', type_name), use_type_annotation=True)
assert result == expected
def test_return_type(self, lang_name):
"""Tests that the translating the return type in signature works."""
self._setup_test(lang_name)
type_name = self.lang_spec['primitives']['string']['type_name']
expected = self.prompt_spec['return_type'].replace('TYPE_NAME', type_name)
result = self.prompt_translator.translate_signature_returns_to_lang(
SchemaType('string', type_name), use_type_annotation=True)
assert result == expected
def test_argument_name(self, lang_name):
"""Test that translating argument name to language works."""
self._setup_test(lang_name)
expected = self.prompt_spec['argument_name']
result = self.prompt_translator.translate_argument_name_to_lang('arg_name')
assert result == expected
def test_signature_with_docstring(self, lang_name):
"""Test that translating signature with the docstring works."""
self._setup_test(lang_name)
type_name = self.lang_spec['primitives']['string']['type_name']
schema = {
'arg_name': SchemaType('string', type_name),
data_types.EXPECTED_KEY_NAME: SchemaType('string', type_name),
}
input_order = ['arg_name']
signature = self.prompt_spec['signature_argument'].replace(
'TYPE_NAME', type_name)
return_type = self.prompt_spec['return_type'].replace(
'TYPE_NAME', type_name)
docstring = self.prompt_spec['docstring']
expected = self.prompt_spec['signature_with_docstring']
expected = expected.replace('SIGNATURE', signature)
expected = expected.replace('RETURNS', return_type)
expected = expected.replace('DOCSTRING', docstring.replace('\\', '\\\\'))
result = self.prompt_translator.translate_signature_with_docstring(
'Python',
'Test Docstring.\n/**///*/--"""',
'test',
'Solution',
schema,
input_order,
True,
)
assert result == expected
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing each schema parser in each language."""
import pytest
from babelcode import data_types
from babelcode import schema_parsing
from tests import utils as testing_utils
SchemaType = schema_parsing.SchemaType
@pytest.mark.parametrize('lang_name', testing_utils.LANGS_TO_TEST)
class TestLanguageSchemaParsing(testing_utils.BaseLanguageTestingClass):
"""Testing each schema parser in each language."""
def assert_parse_lang_schema_correct_basic(self, convert_type: str,
expected_schema_type: SchemaType):
"""Assertion that the basic SchemaTypes are correct.
Args:
convert_type: The input type string to convert to a SchemaType.
expected_schema_type: The expected schema type.
"""
input_schema_type = SchemaType.from_generic_type_string(convert_type)
raw_schema = {
'arg0': input_schema_type,
'arg1': input_schema_type,
data_types.EXPECTED_KEY_NAME: input_schema_type,
}
result_schema = schema_parsing.parse_language_schema(
self.schema_spec, raw_schema)
assert result_schema == {
'arg0': expected_schema_type,
'arg1': expected_schema_type,
data_types.EXPECTED_KEY_NAME: expected_schema_type,
}
@pytest.mark.parametrize('convert_type', schema_parsing.PRIMITIVE_TYPES)
def test_parse_lang_schema_primitives(self, lang_name: str,
convert_type: str):
"""Test that the primitives by themselves are parsed correctly."""
self._setup_test(lang_name)
expected_output = self.lang_spec['primitives'][convert_type]['type_name']
expected_schema_type = SchemaType.from_generic_type_string(convert_type)
expected_schema_type.lang_type = expected_output
self.assert_parse_lang_schema_correct_basic(convert_type,
expected_schema_type)
@pytest.mark.parametrize('convert_type', ['string', 'integer', 'boolean'])
@pytest.mark.parametrize('ds_type', testing_utils.DATA_STRUCTURES)
def test_make_lang_schema_data_structures(self, lang_name: str,
convert_type: str, ds_type: str):
"""Test that the data structures with primitive types are correct."""
self._setup_test(lang_name)
io_spec = self.get_primitive_data(convert_type)
target_type = io_spec['type_name']
expected_ds_type_dict = self.lang_spec['data_structures_schema'][ds_type]
def update_expected_lang_types(s_type, lang_type_dict) -> SchemaType:
s_type.lang_type = lang_type_dict['expected'].replace(
'TYPE_NAME_1', target_type)
s_type.lang_type = s_type.lang_type.replace('TYPE_NAME_2', target_type)
if 'elements' in lang_type_dict:
print(s_type)
s_type.elements = [
update_expected_lang_types(s_type.elements[i], elem_type)
for i, elem_type in enumerate(lang_type_dict['elements'])
]
if 'key_type' in lang_type_dict:
s_type.key_type = update_expected_lang_types(s_type.key_type,
lang_type_dict['key_type'])
return s_type
ds_type = ds_type.replace('TYPE_NAME_1', convert_type)
ds_type = ds_type.replace('TYPE_NAME_2', convert_type)
expected_schema_type = SchemaType.from_generic_type_string(ds_type)
expected_schema_type = update_expected_lang_types(expected_schema_type,
expected_ds_type_dict)
if 'map' in ds_type and convert_type == 'integer' and lang_name == 'R':
with pytest.raises(schema_parsing.SchemaTypeError):
input_schema_type = SchemaType.from_generic_type_string(
ds_type.replace('TYPE_NAME_1', convert_type))
raw_schema = {
'arg0': input_schema_type,
'arg1': input_schema_type,
data_types.EXPECTED_KEY_NAME: input_schema_type,
}
schema_parsing.parse_language_schema(self.schema_spec, raw_schema)
else:
self.assert_parse_lang_schema_correct_basic(
convert_type=ds_type.replace('TYPE_NAME_1', convert_type),
expected_schema_type=expected_schema_type,
)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing code generation in each language."""
import pytest
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
from babelcode import data_types
from babelcode import schema_parsing
from tests import utils as testing_utils
SchemaType = schema_parsing.SchemaType
@pytest.fixture()
def sample_schema():
"""Sample schema fixture."""
yield {
'arg0':
SchemaType.from_generic_type_string('list<list<string>>'),
'arg1':
SchemaType.from_generic_type_string('boolean'),
data_types.EXPECTED_KEY_NAME:
SchemaType.from_generic_type_string('integer'),
}
@pytest.mark.parametrize('lang_name', testing_utils.LANGS_TO_TEST)
class TestLanguageGeneration(testing_utils.BaseLanguageTestingClass):
"""Test that the code generation in each language is correct."""
def _setup_conversion_test(self, lang_name, convert_type):
"""Helper function to do common actions to setup a conversion test."""
self._setup_test(lang_name)
self.type_spec = self.get_primitive_data(convert_type)
def assert_convert_method_correct(self, convert_type, input_val, output_val):
"""Assert that a conversion method is correct."""
underlying_type = SchemaType(convert_type)
convert_fn = self.literal_translator.convert_primitive_fn
assert convert_fn(underlying_type, input_val) == output_val
@pytest.mark.parametrize('convert_type', schema_parsing.PRIMITIVE_TYPES)
def test_convert_primitive(self, lang_name, convert_type):
"""Test converting each primitive type."""
self._setup_conversion_test(lang_name, convert_type)
self.assert_convert_method_correct(convert_type, self.type_spec['input'],
self.type_spec['output'])
@pytest.mark.parametrize('convert_type', schema_parsing.PRIMITIVE_WITH_NULL)
def test_convert_primitive_null_value(self, lang_name, convert_type):
"""Test that converting the primitives that can be null are correct."""
self._setup_conversion_test(lang_name, convert_type)
self.assert_convert_method_correct(convert_type, None,
self.type_spec['null_output'])
@pytest.mark.parametrize('type_test', ['list', 'set'])
@pytest.mark.parametrize('convert_type', schema_parsing.PRIMITIVE_TYPES)
def test_convert_array_style_type(self, lang_name, type_test, convert_type):
"""Test that converting list is correct for each primitive."""
self._setup_conversion_test(lang_name, convert_type)
input_val = self.type_spec['input']
template_dict = self.lang_spec['data_structures_literals']
if type_test == 'list':
arg_type = SchemaType.from_generic_type_string(
f'list<list<{convert_type}>>')
input_value = [[input_val, input_val], [input_val]]
expected = template_dict['nested_list']
else:
arg_type = SchemaType.from_generic_type_string(f'set<{convert_type}>')
input_value = [input_val]
expected = template_dict['set']
schema = self.parse_lang_schema({'arg0': arg_type})
result = self.literal_translator.convert_array_like_type(
schema['arg0'], input_value, type_test == 'set')
expected = expected.replace('TYPE_VAL_1', self.type_spec['output'])
type_name_to_replace = schema['arg0'].lang_type
if lang_name in ['CSharp', 'Typescript']:
type_name_to_replace = schema['arg0'].elements[0].lang_type
expected = expected.replace('TYPE_NAME_1', type_name_to_replace)
assert result == expected
@pytest.mark.parametrize('convert_type', schema_parsing.PRIMITIVE_TYPES)
def test_convert_map(self, lang_name, convert_type):
"""Test that converting map is correct for each primitive."""
self._setup_conversion_test(lang_name, convert_type)
input_val = self.type_spec['input']
schema = self.parse_lang_schema({
'arg0':
SchemaType.from_generic_type_string(
f'map<string;list<{convert_type}>>')
})
result = self.literal_translator.convert_map(
schema['arg0'], {'key_value': [input_val, input_val]})
map_template = self.lang_spec['data_structures_literals']['nested_map']
expected = map_template.replace('TYPE_VAL_1', self.type_spec['output'])
key_value = self.literal_translator.convert_primitive_fn(
SchemaType(type_str='string'), 'key_value')
expected = expected.replace('KEY_VAL_1', key_value)
expected = expected.replace(
'KEY_TYPE_1', self.lang_spec['primitives']['string']['type_name'])
type_name_to_replace = schema['arg0'].lang_type
if lang_name in ['CSharp', 'Go', 'Typescript']:
type_name_to_replace = schema['arg0'].elements[0].lang_type
expected = expected.replace('TYPE_NAME_1', type_name_to_replace)
assert result == expected
def test_string_correctly_escaped(self, lang_name):
"""Tests that language specific escaping works."""
self._setup_test(lang_name)
assert self.literal_translator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.