or4cl3ai/Aiden_t5
Text Generation
•
Updated
•
814
•
17
python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
#! /usr/bin/env python
#
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from setuptools import find_packages
from arxiv_latex_cleaner._version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = []
with open("requirements.txt") as f:
for l in f.readlines():
l_c = l.strip()
if l_c and not l_c.startswith('#'):
install_requires.append(l_c)
setup(
name="arxiv_latex_cleaner",
version=__version__,
packages=find_packages(exclude=["*.tests"]),
python_requires='>=3',
url="https://github.com/google-research/arxiv-latex-cleaner",
license="Apache License, Version 2.0",
author="Google Research Authors",
author_email="[email protected]",
description="Cleans the LaTeX code of your paper to submit to arXiv.",
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={
"console_scripts": ["arxiv_latex_cleaner=arxiv_latex_cleaner.__main__:__main__"]
},
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Science/Research",
],
)
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "v1.0.1"
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleans the LaTeX code of your paper to submit to arXiv."""
import collections
import contextlib
import copy
import os
from tempfile import tempdir
import tempfile
import regex
import shutil
import subprocess
import logging
from PIL import Image
PDF_RESIZE_COMMAND = (
'gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dNOPAUSE -dQUIET -dBATCH '
'-dDownsampleColorImages=true -dColorImageResolution={resolution} '
'-dColorImageDownsampleThreshold=1.0 -dAutoRotatePages=/None '
'-sOutputFile={output} {input}')
MAX_FILENAME_LENGTH = 120
# Fix for Windows: Even if '\' (os.sep) is the standard way of making paths on
# Windows, it interferes with regular expressions. We just change os.sep to '/'
# and os.path.join to a version using '/' as Windows will handle it the right
# way.
if os.name == 'nt':
global old_os_path_join
def new_os_join(path, *args):
res = old_os_path_join(path, *args)
res = res.replace('\\', '/')
return res
old_os_path_join = os.path.join
os.sep = '/'
os.path.join = new_os_join
def _create_dir_erase_if_exists(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def _create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def _keep_pattern(haystack, patterns_to_keep):
"""Keeps the strings that match 'patterns_to_keep'."""
out = []
for item in haystack:
if any((regex.findall(rem, item) for rem in patterns_to_keep)):
out.append(item)
return out
def _remove_pattern(haystack, patterns_to_remove):
"""Removes the strings that match 'patterns_to_remove'."""
return [
item for item in haystack
if item not in _keep_pattern([item], patterns_to_remove)
]
def _list_all_files(in_folder, ignore_dirs=None):
if ignore_dirs is None:
ignore_dirs = []
to_consider = [
os.path.join(os.path.relpath(path, in_folder), name)
if path != in_folder else name
for path, _, files in os.walk(in_folder)
for name in files
]
return _remove_pattern(to_consider, ignore_dirs)
def _copy_file(filename, params):
_create_dir_if_not_exists(
os.path.join(params['output_folder'], os.path.dirname(filename)))
shutil.copy(
os.path.join(params['input_folder'], filename),
os.path.join(params['output_folder'], filename))
def _remove_command(text, command, keep_text=False):
"""Removes '\\command{*}' from the string 'text'.
Regex `base_pattern` used to match balanced parentheses taken from:
https://stackoverflow.com/questions/546433/regular-expression-to-match-balanced-parentheses/35271017#35271017
"""
base_pattern = r'\\' + command + r'\{((?:[^{}]+|\{(?1)\})*)\}'
# Loops in case of nested commands that need to retain text, e.g.,
# \red{hello \red{world}}.
while True:
all_substitutions = []
has_match = False
for match in regex.finditer(base_pattern, text):
# In case there are only spaces or nothing up to the following newline,
# adds a percent, not to alter the newlines.
has_match = True
new_substring = '' if not keep_text else text[match.span()[0] +
len(command) +
2:match.span()[1] - 1]
if match.span()[1] < len(text):
next_newline = text[match.span()[1]:].find('\n')
if next_newline != -1:
text_until_newline = text[match.span()[1]:match.span()[1] +
next_newline]
if (not text_until_newline or
text_until_newline.isspace()) and not keep_text:
new_substring = '%'
all_substitutions.append(
(match.span()[0], match.span()[1], new_substring))
for (start, end, new_substring) in reversed(all_substitutions):
text = text[:start] + new_substring + text[end:]
if not keep_text or not has_match:
break
return text
def _remove_environment(text, environment):
"""Removes '\\begin{environment}*\\end{environment}' from 'text'."""
# Need to escape '{', to not trigger fuzzy matching if `environment` starts
# with one of 'i', 'd', 's', or 'e'
return regex.sub(
r'\\begin\{' + environment + r'}[\s\S]*?\\end\{' + environment + r'}', '',
text)
def _remove_iffalse_block(text):
"""Removes possibly nested r'\iffalse*\fi' blocks from 'text'."""
p = regex.compile(r'\\if\s*(\w+)|\\fi(?!\w)')
level = -1
positions_to_delete = []
start, end = 0, 0
for m in p.finditer(text):
if (m.group().replace(' ', '') == r'\iffalse' or
m.group().replace(' ', '') == r'\if0') and level == -1:
level += 1
start = m.start()
elif m.group().startswith(r'\if') and level >= 0:
level += 1
elif m.group() == r'\fi' and level >= 0:
if level == 0:
end = m.end()
positions_to_delete.append((start, end))
level -= 1
else:
pass
for (start, end) in reversed(positions_to_delete):
if end < len(text) and text[end].isspace():
end_to_del = end + 1
else:
end_to_del = end
text = text[:start] + text[end_to_del:]
return text
def _remove_comments_inline(text):
"""Removes the comments from the string 'text'."""
if 'auto-ignore' in text:
return text
if text.lstrip(' ').lstrip('\t').startswith('%'):
return ''
match = regex.search(r'(?<!\\)%', text)
if match:
return text[:match.end()] + '\n'
else:
return text
def _strip_tex_contents(lines, end_str):
"""Removes everything after end_str."""
for i in range(len(lines)):
if end_str in lines[i]:
if '%' not in lines[i]:
return lines[:i + 1]
elif lines[i].index('%') > lines[i].index(end_str):
return lines[:i + 1]
return lines
def _read_file_content(filename):
with open(filename, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
lines = _strip_tex_contents(lines, '\\end{document}')
return lines
def _read_all_tex_contents(tex_files, parameters):
contents = {}
for fn in tex_files:
contents[fn] = _read_file_content(
os.path.join(parameters['input_folder'], fn))
return contents
def _write_file_content(content, filename):
_create_dir_if_not_exists(os.path.dirname(filename))
with open(filename, 'w', encoding='utf-8') as fp:
return fp.write(content)
def _remove_comments_and_commands_to_delete(content, parameters):
"""Erases all LaTeX comments in the content, and writes it."""
content = [_remove_comments_inline(line) for line in content]
content = _remove_environment(''.join(content), 'comment')
content = _remove_iffalse_block(content)
for environment in parameters.get('environments_to_delete', []):
content = _remove_environment(content, environment)
for command in parameters.get('commands_only_to_delete', []):
content = _remove_command(content, command, True)
for command in parameters['commands_to_delete']:
content = _remove_command(content, command, False)
return content
def _replace_tikzpictures(content, figures):
"""
Replaces all tikzpicture environments (with includegraphic commands of
external PDF figures) in the content, and writes it.
"""
def get_figure(matchobj):
found_tikz_filename = regex.search(r'\\tikzsetnextfilename{(.*?)}',
matchobj.group(0)).group(1)
# search in tex split if figure is available
matching_tikz_filenames = _keep_pattern(
figures, ['/' + found_tikz_filename + '.pdf'])
if len(matching_tikz_filenames) == 1:
return '\\includegraphics{' + matching_tikz_filenames[0] + '}'
else:
return matchobj.group(0)
content = regex.sub(r'\\tikzsetnextfilename{[\s\S]*?\\end{tikzpicture}',
get_figure, content)
return content
def _replace_includesvg(content, svg_inkscape_files):
def repl_svg(matchobj):
svg_path = matchobj.group(2)
svg_filename = os.path.basename(svg_path)
# search in svg_inkscape split if pdf_tex file is available
matching_pdf_tex_files = _keep_pattern(
svg_inkscape_files, ['/' + svg_filename + '-tex.pdf_tex'])
if len(matching_pdf_tex_files) == 1:
options = '' if matchobj.group(1) is None else matchobj.group(1)
return f'\\includeinkscape{options}{{{matching_pdf_tex_files[0]}}}'
else:
return matchobj.group(0)
content = regex.sub(r'\\includesvg(\[.*?\])?{(.*?)}', repl_svg, content)
return content
def _resize_and_copy_figure(filename, origin_folder, destination_folder,
resize_image, image_size, compress_pdf,
pdf_resolution):
"""Resizes and copies the input figure (either JPG, PNG, or PDF)."""
_create_dir_if_not_exists(
os.path.join(destination_folder, os.path.dirname(filename)))
if resize_image and os.path.splitext(filename)[1].lower() in [
'.jpg', '.jpeg', '.png'
]:
im = Image.open(os.path.join(origin_folder, filename))
if max(im.size) > image_size:
im = im.resize(
tuple([int(x * float(image_size) / max(im.size)) for x in im.size]),
Image.Resampling.LANCZOS)
if os.path.splitext(filename)[1].lower() in ['.jpg', '.jpeg']:
im.save(os.path.join(destination_folder, filename), 'JPEG', quality=90)
elif os.path.splitext(filename)[1].lower() in ['.png']:
im.save(os.path.join(destination_folder, filename), 'PNG')
elif compress_pdf and os.path.splitext(filename)[1].lower() == '.pdf':
_resize_pdf_figure(filename, origin_folder, destination_folder,
pdf_resolution)
else:
shutil.copy(
os.path.join(origin_folder, filename),
os.path.join(destination_folder, filename))
def _resize_pdf_figure(filename,
origin_folder,
destination_folder,
resolution,
timeout=10):
input_file = os.path.join(origin_folder, filename)
output_file = os.path.join(destination_folder, filename)
bash_command = PDF_RESIZE_COMMAND.format(
input=input_file, output=output_file, resolution=resolution)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
outs, errs = process.communicate()
print('Output: ', outs)
print('Errors: ', errs)
def _copy_only_referenced_non_tex_not_in_root(parameters, contents, splits):
for fn in _keep_only_referenced(
splits['non_tex_not_in_root'], contents, strict=True):
_copy_file(fn, parameters)
def _resize_and_copy_figures_if_referenced(parameters, contents, splits):
image_size = collections.defaultdict(lambda: parameters['im_size'])
image_size.update(parameters['images_allowlist'])
pdf_resolution = collections.defaultdict(
lambda: parameters['pdf_im_resolution'])
pdf_resolution.update(parameters['images_allowlist'])
for image_file in _keep_only_referenced(
splits['figures'], contents, strict=False):
_resize_and_copy_figure(
filename=image_file,
origin_folder=parameters['input_folder'],
destination_folder=parameters['output_folder'],
resize_image=parameters['resize_images'],
image_size=image_size[image_file],
compress_pdf=parameters['compress_pdf'],
pdf_resolution=pdf_resolution[image_file])
def _search_reference(filename, contents, strict=False):
"""Returns a match object if filename is referenced in contents, and None otherwise.
If not strict mode, path prefix and extension are optional.
"""
if strict:
# regex pattern for strict=True for path/to/img.ext:
# \{[\s%]*path/to/img\.ext[\s%]*\}
filename_regex = filename.replace('.', r'\.')
else:
basename = os.path.basename(filename)
# make extension optional
root, extension = os.path.splitext(basename)
unescaped_basename_regex = '{}({})?'.format(root, extension)
basename_regex = unescaped_basename_regex.replace('.', r'\.')
# since os.path.split only splits into two parts
# need to iterate and collect all the fragments
fragments = []
cur_head = os.path.dirname(filename)
while cur_head:
cur_head, tail = os.path.split(cur_head)
fragments.insert(0, tail) # insert at the beginning
path_prefix_regex = ''
for fragment in fragments:
path_prefix_regex = '({}{}{})?'.format(path_prefix_regex, fragment,
os.sep)
# Regex pattern for strict=True for path/to/img.ext:
# \{[\s%]*(<path_prefix>)?<basename>(<ext>)?[\s%]*\}
filename_regex = path_prefix_regex + basename_regex
# Some files 'path/to/file' are referenced in tex as './path/to/file' thus
# adds prefix for relative paths starting with './' or '.\' to regex search.
filename_regex = r'(.' + os.sep + r')?' + filename_regex
# Pads with braces and optional whitespace/comment characters.
patn = r'\{{[\s%]*{}[\s%]*\}}'.format(filename_regex)
# Picture references in LaTeX are allowed to be in different cases.
return regex.search(patn, contents, regex.IGNORECASE)
def _keep_only_referenced(filenames, contents, strict=False):
"""Returns the filenames referenced from contents.
If not strict mode, path prefix and extension are optional.
"""
return [
fn for fn in filenames
if _search_reference(fn, contents, strict) is not None
]
def _keep_only_referenced_tex(contents, splits):
"""Returns the filenames referenced from the tex files themselves.
It needs various iterations in case one file is referenced from an
unreferenced file.
"""
old_referenced = set(splits['tex_in_root'] + splits['tex_not_in_root'])
while True:
referenced = set(splits['tex_in_root'])
for fn in old_referenced:
for fn2 in old_referenced:
if regex.search(r'(' + os.path.splitext(fn)[0] + r'[.}])',
'\n'.join(contents[fn2])):
referenced.add(fn)
if referenced == old_referenced:
splits['tex_to_copy'] = list(referenced)
return
old_referenced = referenced.copy()
def _add_root_tex_files(splits):
# TODO: Check auto-ignore marker in root to detect the main file. Then check
# there is only one non-referenced TeX in root.
# Forces the TeX in root to be copied, even if they are not referenced.
for fn in splits['tex_in_root']:
if fn not in splits['tex_to_copy']:
splits['tex_to_copy'].append(fn)
def _split_all_files(parameters):
"""Splits the files into types or location to know what to do with them."""
file_splits = {
'all':
_list_all_files(
parameters['input_folder'], ignore_dirs=['.git' + os.sep]),
'in_root': [
f for f in os.listdir(parameters['input_folder'])
if os.path.isfile(os.path.join(parameters['input_folder'], f))
]
}
file_splits['not_in_root'] = [
f for f in file_splits['all'] if f not in file_splits['in_root']
]
file_splits['to_copy_in_root'] = _remove_pattern(
file_splits['in_root'],
parameters['to_delete'] + parameters['figures_to_copy_if_referenced'])
file_splits['to_copy_not_in_root'] = _remove_pattern(
file_splits['not_in_root'],
parameters['to_delete'] + parameters['figures_to_copy_if_referenced'])
file_splits['figures'] = _keep_pattern(
file_splits['all'], parameters['figures_to_copy_if_referenced'])
file_splits['tex_in_root'] = _keep_pattern(file_splits['to_copy_in_root'],
['.tex$', '.tikz$'])
file_splits['tex_not_in_root'] = _keep_pattern(
file_splits['to_copy_not_in_root'], ['.tex$', '.tikz$'])
file_splits['non_tex_in_root'] = _remove_pattern(
file_splits['to_copy_in_root'], ['.tex$', '.tikz$'])
file_splits['non_tex_not_in_root'] = _remove_pattern(
file_splits['to_copy_not_in_root'], ['.tex$', '.tikz$'])
if parameters.get('use_external_tikz', None) is not None:
file_splits['external_tikz_figures'] = _keep_pattern(
file_splits['all'], [parameters['use_external_tikz']])
else:
file_splits['external_tikz_figures'] = []
if parameters.get('svg_inkscape', None) is not None:
file_splits['svg_inkscape'] = _keep_pattern(
file_splits['all'], [parameters['svg_inkscape']])
else:
file_splits['svg_inkscape'] = []
return file_splits
def _create_out_folder(input_folder):
"""Creates the output folder, erasing it if existed."""
out_folder = os.path.abspath(input_folder).removesuffix(".zip") + '_arXiv'
_create_dir_erase_if_exists(out_folder)
return out_folder
def run_arxiv_cleaner(parameters):
"""Core of the code, runs the actual arXiv cleaner."""
files_to_delete = [
r'\.aux$', r'\.sh$', r'\.blg$', r'\.brf$', r'\.log$', r'\.out$', r'\.ps$',
r'\.dvi$', r'\.synctex.gz$', '~$', r'\.backup$', r'\.gitignore$',
r'\.DS_Store$', r'\.svg$', r'^\.idea', r'\.dpth$', r'\.md5$', r'\.dep$',
r'\.auxlock$', r'\.fls$', r'\.fdb_latexmk$'
]
if not parameters['keep_bib']:
files_to_delete.append(r'\.bib$')
parameters.update({
'to_delete':
files_to_delete,
'figures_to_copy_if_referenced': [
r'\.png$', r'\.jpg$', r'\.jpeg$', r'\.pdf$'
]
})
logging.info('Collecting file structure.')
parameters['output_folder'] = _create_out_folder(parameters['input_folder'])
from_zip = parameters['input_folder'].endswith('.zip')
tempdir_context = tempfile.TemporaryDirectory() if from_zip else contextlib.suppress()
with tempdir_context as tempdir:
if from_zip:
logging.info('Unzipping input folder.')
shutil.unpack_archive(parameters['input_folder'], tempdir)
parameters['input_folder'] = tempdir
splits = _split_all_files(parameters)
logging.info('Reading all tex files')
tex_contents = _read_all_tex_contents(
splits['tex_in_root'] + splits['tex_not_in_root'], parameters)
for tex_file in tex_contents:
logging.info('Removing comments in file %s.', tex_file)
tex_contents[tex_file] = _remove_comments_and_commands_to_delete(
tex_contents[tex_file], parameters)
for tex_file in tex_contents:
logging.info('Replacing \\includesvg calls in file %s.', tex_file)
tex_contents[tex_file] = _replace_includesvg(tex_contents[tex_file],
splits['svg_inkscape'])
for tex_file in tex_contents:
logging.info('Replacing Tikz Pictures in file %s.', tex_file)
content = _replace_tikzpictures(tex_contents[tex_file],
splits['external_tikz_figures'])
# If file ends with '\n' already, the split in last line would add an extra
# '\n', so we remove it.
tex_contents[tex_file] = content.split('\n')
_keep_only_referenced_tex(tex_contents, splits)
_add_root_tex_files(splits)
for tex_file in splits['tex_to_copy']:
logging.info('Replacing patterns in file %s.', tex_file)
content = '\n'.join(tex_contents[tex_file])
content = _find_and_replace_patterns(
content, parameters.get('patterns_and_insertions', list()))
tex_contents[tex_file] = content
new_path = os.path.join(parameters['output_folder'], tex_file)
logging.info('Writing modified contents to %s.', new_path)
_write_file_content(
content,
new_path,
)
full_content = '\n'.join(
''.join(tex_contents[fn]) for fn in splits['tex_to_copy'])
_copy_only_referenced_non_tex_not_in_root(parameters, full_content, splits)
for non_tex_file in splits['non_tex_in_root']:
logging.info('Copying non-tex file %s.', non_tex_file)
_copy_file(non_tex_file, parameters)
_resize_and_copy_figures_if_referenced(parameters, full_content, splits)
logging.info('Outputs written to %s', parameters['output_folder'])
def strip_whitespace(text):
"""Strips all whitespace characters.
https://stackoverflow.com/questions/8270092/remove-all-whitespace-in-a-string
"""
pattern = regex.compile(r'\s+')
text = regex.sub(pattern, '', text)
return text
def merge_args_into_config(args, config_params):
final_args = copy.deepcopy(config_params)
config_keys = config_params.keys()
for key, value in args.items():
if key in config_keys:
if any([isinstance(value, t) for t in [str, bool, float, int]]):
# Overwrites config value with args value.
final_args[key] = value
elif isinstance(value, list):
# Appends args values to config values.
final_args[key] = value + config_params[key]
elif isinstance(value, dict):
# Updates config params with args params.
final_args[key].update(**value)
else:
final_args[key] = value
return final_args
def _find_and_replace_patterns(content, patterns_and_insertions):
r"""
content: str
patterns_and_insertions: List[Dict]
Example for patterns_and_insertions:
[
{
"pattern" :
r"(?:\\figcompfigures{\s*)(?P<first>.*?)\s*}\s*{\s*(?P<second>.*?)\s*}\s*{\s*(?P<third>.*?)\s*}",
"insertion" :
r"\parbox[c]{{{second}\linewidth}}{{\includegraphics[width={third}\linewidth]{{figures/{first}}}}}}",
"description": "Replace figcompfigures"
},
]
"""
for pattern_and_insertion in patterns_and_insertions:
pattern = pattern_and_insertion['pattern']
insertion = pattern_and_insertion['insertion']
description = pattern_and_insertion['description']
logging.info('Processing pattern: %s.', description)
p = regex.compile(pattern)
m = p.search(content)
while m is not None:
local_insertion = insertion.format(**m.groupdict())
if pattern_and_insertion.get('strip_whitespace', True):
local_insertion = strip_whitespace(local_insertion)
logging.info(f'Found {content[m.start():m.end()]:<70}')
logging.info(f'Replacing with {local_insertion:<30}')
content = content[:m.start()] + local_insertion + content[m.end():]
m = p.search(content)
logging.info('Finished pattern: %s.', description)
return content
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for ``arxiv_latex_cleaner``.
.. code-block:: bash
$ python -m arxiv_latex_cleaner --help
"""
import argparse
import json
import logging
from ._version import __version__
from .arxiv_latex_cleaner import merge_args_into_config
from .arxiv_latex_cleaner import run_arxiv_cleaner
import yaml
PARSER = argparse.ArgumentParser(
prog="arxiv_latex_cleaner@{0}".format(__version__),
description=("Clean the LaTeX code of your paper to submit to arXiv. "
"Check the README for more information on the use."),
)
PARSER.add_argument(
"input_folder", type=str, help="Input folder or zip archive containing the LaTeX code.")
PARSER.add_argument(
"--resize_images",
action="store_true",
help="Resize images.",
)
PARSER.add_argument(
"--im_size",
default=500,
type=int,
help=("Size of the output images (in pixels, longest side). Fine tune this "
"to get as close to 10MB as possible."),
)
PARSER.add_argument(
"--compress_pdf",
action="store_true",
help="Compress PDF images using ghostscript (Linux and Mac only).",
)
PARSER.add_argument(
"--pdf_im_resolution",
default=500,
type=int,
help="Resolution (in dpi) to which the tool resamples the PDF images.",
)
PARSER.add_argument(
"--images_allowlist",
default={},
type=json.loads,
help=("Images (and PDFs) that won't be resized to the default resolution,"
"but the one provided here. Value is pixel for images, and dpi for"
"PDFs, as in --im_size and --pdf_im_resolution, respectively. Format "
"is a dictionary as: '{\"path/to/im.jpg\": 1000}'"),
)
PARSER.add_argument(
"--keep_bib",
action="store_true",
help="Avoid deleting the *.bib files.",
)
PARSER.add_argument(
"--commands_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX commands that will be deleted. Useful for e.g. user-defined "
"\\todo commands. For example, to delete all occurrences of \\todo1{} "
"and \\todo2{}, run the tool with `--commands_to_delete todo1 todo2`."
"Please note that the positional argument `input_folder` cannot come "
"immediately after `commands_to_delete`, as the parser does not have "
"any way to know if it's another command to delete."),
)
PARSER.add_argument(
"--commands_only_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX commands that will be deleted but the text wrapped in the "
"commands will be retained. Useful for commands that change text "
"formats and colors, which you may want to remove but keep the "
"text within. Usages are exactly the same as commands_to_delete. "
"Note that if the commands listed here duplicate that after "
"commands_to_delete, the default action will be retaining the wrapped text."),
)
PARSER.add_argument(
"--environments_to_delete",
nargs="+",
default=[],
required=False,
help=(
"LaTeX environments that will be deleted. Useful for e.g. user-"
"defined comment environments. For example, to delete all occurrences "
"of \\begin{note} ... \\end{note}, run the tool with "
"`--environments_to_delete note`. Please note that the positional "
"argument `input_folder` cannot come immediately after "
"`environments_to_delete`, as the parser does not have any way to "
"know if it's another environment to delete."),
)
PARSER.add_argument(
"--use_external_tikz",
type=str,
help=("Folder (relative to input folder) containing externalized tikz "
"figures in PDF format."))
PARSER.add_argument(
"--svg_inkscape",
nargs="?",
type=str,
const="svg-inkscape",
help=(
"Include PDF files generated by Inkscape via the `\\includesvg` "
"command from the `svg` package. This is done by replacing the "
"`\\includesvg` calls with `\\includeinkscape` calls pointing to the "
"generated `.pdf_tex` files. By default, these files and the "
"generated PDFs are located under `./svg-inkscape` (relative to the "
"input folder), but a different path (relative to the input folder) "
"can be provided in case a different `inkscapepath` was set when "
"loading the `svg` package."
)
)
PARSER.add_argument(
"--config",
type=str,
help=("Read settings from `.yaml` config file. If command line arguments "
"are provided additionally, the config file parameters are updated "
"with the command line parameters."),
required=False,
)
PARSER.add_argument(
"--verbose",
action="store_true",
help="Enable detailed output.",
)
ARGS = vars(PARSER.parse_args())
if ARGS["config"] is not None:
try:
with open(ARGS["config"], "r") as config_file:
config_params = yaml.safe_load(config_file)
final_args = merge_args_into_config(ARGS, config_params)
except FileNotFoundError:
print(f"config file {ARGS.config} not found.")
final_args = ARGS
final_args.pop("config", None)
else:
final_args = ARGS
if final_args.get("verbose", False):
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
run_arxiv_cleaner(final_args)
exit(0)
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
import shutil
import unittest
from absl.testing import parameterized
from arxiv_latex_cleaner import arxiv_latex_cleaner
from PIL import Image
def make_args(
input_folder='foo/bar',
resize_images=False,
im_size=500,
compress_pdf=False,
pdf_im_resolution=500,
images_allowlist=None,
commands_to_delete=None,
use_external_tikz='foo/bar/tikz',
):
if images_allowlist is None:
images_allowlist = {}
if commands_to_delete is None:
commands_to_delete = []
args = {
'input_folder': input_folder,
'resize_images': resize_images,
'im_size': im_size,
'compress_pdf': compress_pdf,
'pdf_im_resolution': pdf_im_resolution,
'images_allowlist': images_allowlist,
'commands_to_delete': commands_to_delete,
'use_external_tikz': use_external_tikz,
}
return args
def make_contents():
return (r'& \figcompfigures{'
'\n\timage1.jpg'
'\n}{'
'\n\t'
r'\ww'
'\n}{'
'\n\t1.0'
'\n\t}'
'\n& '
r'\figcompfigures{image2.jpg}{\ww}{1.0}')
def make_patterns():
pattern = r'(?:\\figcompfigures{\s*)(?P<first>.*?)\s*}\s*{\s*(?P<second>.*?)\s*}\s*{\s*(?P<third>.*?)\s*}'
insertion = r"""\parbox[c]{{
{second}\linewidth
}}{{
\includegraphics[
width={third}\linewidth
]{{
figures/{first}
}}
}} """
description = 'Replace figcompfigures'
output = {
'pattern': pattern,
'insertion': insertion,
'description': description
}
return [output]
def make_search_reference_tests():
return ({
'testcase_name': 'prefix1',
'filenames': ['include_image_yes.png', 'include_image.png'],
'contents': '\\include{include_image_yes.png}',
'strict': False,
'true_outputs': ['include_image_yes.png']
}, {
'testcase_name': 'prefix2',
'filenames': ['include_image_yes.png', 'include_image.png'],
'contents': '\\include{include_image.png}',
'strict': False,
'true_outputs': ['include_image.png']
}, {
'testcase_name': 'nested_more_specific',
'filenames': [
'images/im_included.png', 'images/include/images/im_included.png'
],
'contents': '\\include{images/include/images/im_included.png}',
'strict': False,
'true_outputs': ['images/include/images/im_included.png']
}, {
'testcase_name':
'nested_less_specific',
'filenames': [
'images/im_included.png', 'images/include/images/im_included.png'
],
'contents':
'\\include{images/im_included.png}',
'strict':
False,
'true_outputs': [
'images/im_included.png', 'images/include/images/im_included.png'
]
}, {
'testcase_name': 'nested_substring',
'filenames': ['images/im_included.png', 'im_included.png'],
'contents': '\\include{images/im_included.png}',
'strict': False,
'true_outputs': ['images/im_included.png']
}, {
'testcase_name': 'nested_diffpath',
'filenames': ['images/im_included.png', 'figures/im_included.png'],
'contents': '\\include{images/im_included.png}',
'strict': False,
'true_outputs': ['images/im_included.png']
}, {
'testcase_name': 'diffext',
'filenames': ['tables/demo.tex', 'tables/demo.tikz', 'demo.tex'],
'contents': '\\include{tables/demo.tex}',
'strict': False,
'true_outputs': ['tables/demo.tex']
}, {
'testcase_name': 'diffext2',
'filenames': ['tables/demo.tex', 'tables/demo.tikz', 'demo.tex'],
'contents': '\\include{tables/demo}',
'strict': False,
'true_outputs': ['tables/demo.tex', 'tables/demo.tikz']
}, {
'testcase_name': 'strict_prefix1',
'filenames': ['demo_yes.tex', 'demo.tex'],
'contents': '\\include{demo_yes.tex}',
'strict': True,
'true_outputs': ['demo_yes.tex']
}, {
'testcase_name': 'strict_prefix2',
'filenames': ['demo_yes.tex', 'demo.tex'],
'contents': '\\include{demo.tex}',
'strict': True,
'true_outputs': ['demo.tex']
}, {
'testcase_name': 'strict_nested_more_specific',
'filenames': [
'tables/table_included.csv',
'tables/include/tables/table_included.csv'
],
'contents': '\\include{tables/include/tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/include/tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_less_specific',
'filenames': [
'tables/table_included.csv',
'tables/include/tables/table_included.csv'
],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_substring1',
'filenames': ['tables/table_included.csv', 'table_included.csv'],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_nested_substring2',
'filenames': ['tables/table_included.csv', 'table_included.csv'],
'contents': '\\include{table_included.csv}',
'strict': True,
'true_outputs': ['table_included.csv']
}, {
'testcase_name': 'strict_nested_diffpath',
'filenames': ['tables/table_included.csv', 'data/table_included.csv'],
'contents': '\\include{tables/table_included.csv}',
'strict': True,
'true_outputs': ['tables/table_included.csv']
}, {
'testcase_name': 'strict_diffext',
'filenames': ['tables/demo.csv', 'tables/demo.txt', 'demo.csv'],
'contents': '\\include{tables/demo.csv}',
'strict': True,
'true_outputs': ['tables/demo.csv']
}, {
'testcase_name': 'path_starting_with_dot',
'filenames': ['./images/im_included.png', './figures/im_included.png'],
'contents': '\\include{./images/im_included.png}',
'strict': False,
'true_outputs': ['./images/im_included.png']
})
class UnitTests(parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'empty config',
'args': make_args(),
'config_params': {},
'final_args': make_args(),
},
{
'testcase_name': 'empty args',
'args': {},
'config_params': make_args(),
'final_args': make_args(),
},
{
'testcase_name':
'args and config provided',
'args':
make_args(
images_allowlist={'path1/': 1000},
commands_to_delete=[r'\todo1']),
'config_params':
make_args(
'foo_/bar_',
True,
1000,
True,
1000,
images_allowlist={'path2/': 1000},
commands_to_delete=[r'\todo2'],
use_external_tikz='foo_/bar_/tikz_',
),
'final_args':
make_args(
images_allowlist={
'path1/': 1000,
'path2/': 1000
},
commands_to_delete=[r'\todo1', r'\todo2'],
),
},
)
def test_merge_args_into_config(self, args, config_params, final_args):
self.assertEqual(
arxiv_latex_cleaner.merge_args_into_config(args, config_params),
final_args)
@parameterized.named_parameters(
{
'testcase_name': 'no_comment',
'line_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'auto_ignore',
'line_in': '%auto-ignore\n',
'true_output': '%auto-ignore\n'
}, {
'testcase_name': 'percent',
'line_in': r'100\% accurate\n',
'true_output': r'100\% accurate\n'
}, {
'testcase_name': 'comment',
'line_in': ' % Comment\n',
'true_output': ''
}, {
'testcase_name': 'comment_inline',
'line_in': 'Foo %Comment\n',
'true_output': 'Foo %\n'
})
def test_remove_comments_inline(self, line_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_comments_inline(line_in), true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_command',
'text_in': 'Foo\nFoo2\n',
'keep_text': False,
'true_output': 'Foo\nFoo2\n'
}, {
'testcase_name': 'command_not_removed',
'text_in': '\\textit{Foo\nFoo2}\n',
'keep_text': False,
'true_output': '\\textit{Foo\nFoo2}\n'
}, {
'testcase_name': 'command_no_end_line_removed',
'text_in': 'A\\todo{B\nC}D\nE\n\\end{document}',
'keep_text': False,
'true_output': 'AD\nE\n\\end{document}'
}, {
'testcase_name': 'command_with_end_line_removed',
'text_in': 'A\n\\todo{B\nC}\nD\n\\end{document}',
'keep_text': False,
'true_output': 'A\n%\nD\n\\end{document}'
}, {
'testcase_name': 'no_command_keep_text',
'text_in': 'Foo\nFoo2\n',
'keep_text': True,
'true_output': 'Foo\nFoo2\n'
}, {
'testcase_name': 'command_not_removed_keep_text',
'text_in': '\\textit{Foo\nFoo2}\n',
'keep_text': True,
'true_output': '\\textit{Foo\nFoo2}\n'
}, {
'testcase_name': 'command_no_end_line_removed_keep_text',
'text_in': 'A\\todo{B\nC}D\nE\n\\end{document}',
'keep_text': True,
'true_output': 'AB\nCD\nE\n\\end{document}'
}, {
'testcase_name': 'command_with_end_line_removed_keep_text',
'text_in': 'A\n\\todo{B\nC}\nD\n\\end{document}',
'keep_text': True,
'true_output': 'A\nB\nC\nD\n\\end{document}'
}, {
'testcase_name': 'nested_command_keep_text',
'text_in': 'A\n\\todo{B\n\\todo{C}}\nD\n\\end{document}',
'keep_text': True,
'true_output': 'A\nB\nC\nD\n\\end{document}'
}, {
'testcase_name':
'deeply_nested_command_keep_text',
'text_in':
'A\n\\todo{B\n\\emph{C\\footnote{\\textbf{D}}}}\nE\n\\end{document}',
'keep_text':
True,
'true_output':
'A\nB\n\\emph{C\\footnote{\\textbf{D}}}\nE\n\\end{document}'
})
def test_remove_command(self, text_in, keep_text, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_command(text_in, 'todo', keep_text),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_environment',
'text_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'environment_not_removed',
'text_in': 'Foo\n\\begin{equation}\n3x+2\n\\end{equation}\nFoo',
'true_output': 'Foo\n\\begin{equation}\n3x+2\n\\end{equation}\nFoo'
}, {
'testcase_name': 'environment_removed',
'text_in': 'Foo\\begin{comment}\n3x+2\n\\end{comment}\nFoo',
'true_output': 'Foo\nFoo'
})
def test_remove_environment(self, text_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_environment(text_in, 'comment'),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_iffalse',
'text_in': 'Foo\n',
'true_output': 'Foo\n'
}, {
'testcase_name': 'if_not_removed',
'text_in': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n'
}, {
'testcase_name': 'if_removed_with_nested_ifvar',
'text_in': '\\ifvar\n\\iffalse\n\\ifvar\nFoo\n\\fi\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\fi\n'
}, {
'testcase_name': 'if_removed_with_nested_iffalse',
'text_in': '\\ifvar\n\\iffalse\n\\iffalse\nFoo\n\\fi\n\\fi\n\\fi\n',
'true_output': '\\ifvar\n\\fi\n'
}, {
'testcase_name': 'if_removed_eof',
'text_in': '\\iffalse\nFoo\n\\fi',
'true_output': ''
}, {
'testcase_name': 'if_removed_space',
'text_in': '\\iffalse\nFoo\n\\fi ',
'true_output': ''
}, {
'testcase_name': 'if_removed_backslash',
'text_in': '\\iffalse\nFoo\n\\fi\\end{document}',
'true_output': '\\end{document}'
}, {
'testcase_name': 'commands_not_removed',
'text_in': '\\newcommand\\figref[1]{Figure~\\ref{fig:\#1}}',
'true_output': '\\newcommand\\figref[1]{Figure~\\ref{fig:\#1}}'
})
def test_remove_iffalse_block(self, text_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._remove_iffalse_block(text_in), true_output)
@parameterized.named_parameters(
{
'testcase_name': 'all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a'],
'true_outputs': ['abc', 'bca'],
}, {
'testcase_name': 'not_all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a$'],
'true_outputs': ['bca'],
})
def test_keep_pattern(self, inputs, patterns, true_outputs):
self.assertEqual(
list(arxiv_latex_cleaner._keep_pattern(inputs, patterns)), true_outputs)
@parameterized.named_parameters(
{
'testcase_name': 'all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a'],
'true_outputs': [],
}, {
'testcase_name': 'not_all_pass',
'inputs': ['abc', 'bca'],
'patterns': ['a$'],
'true_outputs': ['abc'],
})
def test_remove_pattern(self, inputs, patterns, true_outputs):
self.assertEqual(
list(arxiv_latex_cleaner._remove_pattern(inputs, patterns)),
true_outputs)
@parameterized.named_parameters(
{
'testcase_name':
'replace_contents',
'content':
make_contents(),
'patterns_and_insertions':
make_patterns(),
'true_outputs': (
r'& \parbox[c]{\ww\linewidth}{\includegraphics[width=1.0\linewidth]{figures/image1.jpg}}'
'\n'
r'& \parbox[c]{\ww\linewidth}{\includegraphics[width=1.0\linewidth]{figures/image2.jpg}}'
),
},)
def test_find_and_replace_patterns(self, content, patterns_and_insertions,
true_outputs):
output = arxiv_latex_cleaner._find_and_replace_patterns(
content, patterns_and_insertions)
output = arxiv_latex_cleaner.strip_whitespace(output)
true_outputs = arxiv_latex_cleaner.strip_whitespace(true_outputs)
self.assertEqual(output, true_outputs)
@parameterized.named_parameters(
{
'testcase_name': 'no_tikz',
'text_in': 'Foo\n',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output': 'Foo\n'
}, {
'testcase_name':
'tikz_no_match',
'text_in':
'Foo\\tikzsetnextfilename{test_no_match}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output':
'Foo\\tikzsetnextfilename{test_no_match}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo'
}, {
'testcase_name':
'tikz_match',
'text_in':
'Foo\\tikzsetnextfilename{test2}\n\\begin{tikzpicture}\n\\node (test) at (0,0) {Test1};\n\\end{tikzpicture}\nFoo',
'figures_in': ['ext_tikz/test1.pdf', 'ext_tikz/test2.pdf'],
'true_output':
'Foo\\includegraphics{ext_tikz/test2.pdf}\nFoo'
})
def test_replace_tikzpictures(self, text_in, figures_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._replace_tikzpictures(text_in, figures_in),
true_output)
@parameterized.named_parameters(
{
'testcase_name': 'no_includesvg',
'text_in': 'Foo\n',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output': 'Foo\n'
}, {
'testcase_name':
'includesvg_no_match',
'text_in':
'Foo\\includesvg{test_no_match}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includesvg{test_no_match}\nFoo',
}, {
'testcase_name':
'includesvg_match',
'text_in':
'Foo\\includesvg{test2}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includeinkscape{ext_svg/test2-tex.pdf_tex}\nFoo'
}, {
'testcase_name':
'includesvg_match_with_options',
'text_in':
'Foo\\includesvg[width=\\linewidth]{test2}\nFoo',
'figures_in': ['ext_svg/test1-tex.pdf_tex',
'ext_svg/test2-tex.pdf_tex'],
'true_output':
'Foo\\includeinkscape[width=\\linewidth]{ext_svg/test2-tex.pdf_tex}\nFoo'
})
def test_replace_includesvg(self, text_in, figures_in, true_output):
self.assertEqual(
arxiv_latex_cleaner._replace_includesvg(text_in, figures_in),
true_output)
@parameterized.named_parameters(*make_search_reference_tests())
def test_search_reference_weak(self, filenames, contents, strict,
true_outputs):
cleaner_outputs = []
for filename in filenames:
reference = arxiv_latex_cleaner._search_reference(filename, contents,
strict)
if reference is not None:
cleaner_outputs.append(filename)
# weak check (passes as long as cleaner includes a superset of the true_output)
for true_output in true_outputs:
self.assertIn(true_output, cleaner_outputs)
@parameterized.named_parameters(*make_search_reference_tests())
def test_search_reference_strong(self, filenames, contents, strict,
true_outputs):
cleaner_outputs = []
for filename in filenames:
reference = arxiv_latex_cleaner._search_reference(filename, contents,
strict)
if reference is not None:
cleaner_outputs.append(filename)
# strong check (set of files must match exactly)
weak_check_result = set(true_outputs).issubset(cleaner_outputs)
if weak_check_result:
msg = 'not fatal, cleaner included more files than necessary'
else:
msg = 'fatal, see test_search_reference_weak'
self.assertEqual(cleaner_outputs, true_outputs, msg)
@parameterized.named_parameters(
{
'testcase_name': 'three_parent',
'filename': 'long/path/to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{path/to/img.ext}',
'{long/path/to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{ \npath/to/img.ext\n}',
'{ \n \nlong/path/to/img.ext\n}',
'{img}',
'{to/img}',
'{path/to/img}',
'{long/path/to/img}',
# dont match
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 12 + [False] * 13
},
{
'testcase_name': 'two_parent',
'filename': 'path/to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{path/to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{ \npath/to/img.ext\n}',
'{img}',
'{to/img}',
'{path/to/img}',
# dont match
'{long/path/to/img.ext}',
'{ \n \nlong/path/to/img.ext\n}',
'{long/path/to/img}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 9 + [False] * 16
},
{
'testcase_name': 'one_parent',
'filename': 'to/img.ext',
'content_strs': [
# match
'{img.ext}',
'{to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{img}',
'{to/img}',
# dont match
'{long/path/to/img}',
'{path/to/img}',
'{ \n \nlong/path/to/img.ext\n}',
'{ \npath/to/img.ext\n}',
'{long/path/to/img.ext}',
'{path/to/img.ext}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': False,
'true_outputs': [True] * 6 + [False] * 19
},
{
'testcase_name': 'two_parent_strict',
'filename': 'path/to/img.ext',
'content_strs': [
# match
'{path/to/img.ext}',
'{ \npath/to/img.ext\n}',
# dont match
'{img.ext}',
'{to/img.ext}',
'{%\nimg.ext }',
'{to/img.ext % \n}',
'{img}',
'{to/img}',
'{path/to/img}',
'{long/path/to/img.ext}',
'{ \n \nlong/path/to/img.ext\n}',
'{long/path/to/img}',
'{from/img.ext}',
'{from/img}',
'{imgoext}',
'{from/imgo}',
'{ \n long/\npath/to/img.ext\n}',
'{path/img.ext}',
'{long/img.ext}',
'{long/path/img.ext}',
'{long/to/img.ext}',
'{path/img}',
'{long/img}',
'{long/path/img}',
'{long/to/img}'
],
'strict': True,
'true_outputs': [True] * 2 + [False] * 23
},
)
def test_search_reference_filewise(self, filename, content_strs, strict,
true_outputs):
if len(content_strs) != len(true_outputs):
raise ValueError(
"number of true_outputs doesn't match number of content strs")
for content, true_output in zip(content_strs, true_outputs):
reference = arxiv_latex_cleaner._search_reference(filename, content,
strict)
matched = reference is not None
msg_not = ' ' if true_output else ' not '
msg_fmt = 'file {} should' + msg_not + 'have matched latex reference {}'
msg = msg_fmt.format(filename, content)
self.assertEqual(matched, true_output, msg)
class IntegrationTests(parameterized.TestCase):
def setUp(self):
super(IntegrationTests, self).setUp()
self.out_path = 'tex_arXiv'
def _compare_files(self, filename, filename_true):
if path.splitext(filename)[1].lower() in ['.jpg', '.jpeg', '.png']:
with Image.open(filename) as im, Image.open(filename_true) as im_true:
# We check only the sizes of the images, checking pixels would be too
# complicated in case the resize implementations change.
self.assertEqual(
im.size, im_true.size,
'Images {:s} was not resized properly.'.format(filename))
else:
# Checks if text files are equal without taking in account end of line
# characters.
with open(filename, 'rb') as f:
processed_content = f.read().splitlines()
with open(filename_true, 'rb') as f:
groundtruth_content = f.read().splitlines()
self.assertEqual(
processed_content, groundtruth_content,
'{:s} and {:s} are not equal.'.format(filename, filename_true))
@parameterized.named_parameters(
{'testcase_name': 'from_dir',"input_dir":'tex'},
{'testcase_name': 'from_zip',"input_dir":'tex.zip'},
)
def test_complete(self, input_dir):
out_path_true = 'tex_arXiv_true'
# Make sure the folder does not exist, since we erase it in the test.
if path.isdir(self.out_path):
raise RuntimeError('The folder {:s} should not exist.'.format(
self.out_path))
arxiv_latex_cleaner.run_arxiv_cleaner({
'input_folder': input_dir,
'images_allowlist': {
'images/im2_included.jpg': 200,
'images/im3_included.png': 400,
},
'resize_images': True,
'im_size': 100,
'compress_pdf': False,
'pdf_im_resolution': 500,
'commands_to_delete': ['mytodo'],
'commands_only_to_delete': ['red'],
'environments_to_delete': ['mynote'],
'use_external_tikz': 'ext_tikz',
'keep_bib': False
})
# Checks the set of files is the same as in the true folder.
out_files = set(arxiv_latex_cleaner._list_all_files(self.out_path))
out_files_true = set(arxiv_latex_cleaner._list_all_files(out_path_true))
self.assertEqual(out_files, out_files_true)
# Compares the contents of each file against the true value.
for f1 in out_files:
self._compare_files(
path.join(self.out_path, f1), path.join(out_path_true, f1))
def tearDown(self):
shutil.rmtree(self.out_path)
super(IntegrationTests, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_datasets as tfds
import numpy as np
# A workaround to avoid crash because tfds may open to many files.
import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
# Adjust depending on the available RAM.
MAX_IN_MEMORY = 200_000
DATASET_SPLITS = {
'cifar10': {'train': 'train[:98%]', 'test': 'test'},
'cifar100': {'train': 'train[:98%]', 'test': 'test'},
'imagenet2012': {'train': 'train[:99%]', 'test': 'validation'},
}
def get_dataset_info(dataset, split, examples_per_class):
data_builder = tfds.builder(dataset)
original_num_examples = data_builder.info.splits[split].num_examples
num_classes = data_builder.info.features['label'].num_classes
if examples_per_class is not None:
num_examples = examples_per_class * num_classes
else:
num_examples = original_num_examples
return {'original_num_examples': original_num_examples,
'num_examples': num_examples,
'num_classes': num_classes}
def sample_subset(data, num_examples, num_classes,
examples_per_class, examples_per_class_seed):
data = data.batch(min(num_examples, MAX_IN_MEMORY))
data = data.as_numpy_iterator().next()
np.random.seed(examples_per_class_seed)
indices = [idx
for c in range(num_classes)
for idx in np.random.choice(np.where(data['label'] == c)[0],
examples_per_class,
replace=False)]
data = {'image': data['image'][indices],
'label': data['label'][indices]}
data = tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices(data['image']),
tf.data.Dataset.from_tensor_slices(data['label'])))
return data.map(lambda x, y: {'image': x, 'label': y},
tf.data.experimental.AUTOTUNE)
def get_data(dataset, mode,
repeats, batch_size,
resize_size, crop_size,
mixup_alpha,
examples_per_class, examples_per_class_seed,
num_devices,
tfds_manual_dir):
split = DATASET_SPLITS[dataset][mode]
dataset_info = get_dataset_info(dataset, split, examples_per_class)
data_builder = tfds.builder(dataset)
data_builder.download_and_prepare(
download_config=tfds.download.DownloadConfig(manual_dir=tfds_manual_dir))
data = data_builder.as_dataset(
split=split,
decoders={'image': tfds.decode.SkipDecoding()})
decoder = data_builder.info.features['image'].decode_example
if (mode == 'train') and (examples_per_class is not None):
data = sample_subset(data,
dataset_info['original_num_examples'],
dataset_info['num_classes'],
examples_per_class, examples_per_class_seed)
def _pp(data):
im = decoder(data['image'])
if mode == 'train':
im = tf.image.resize(im, [resize_size, resize_size])
im = tf.image.random_crop(im, [crop_size, crop_size, 3])
im = tf.image.flip_left_right(im)
else:
# usage of crop_size here is intentional
im = tf.image.resize(im, [crop_size, crop_size])
im = (im - 127.5) / 127.5
label = tf.one_hot(data['label'], dataset_info['num_classes'])
return {'image': im, 'label': label}
data = data.cache()
data = data.repeat(repeats)
if mode == 'train':
data = data.shuffle(min(dataset_info['num_examples'], MAX_IN_MEMORY))
data = data.map(_pp, tf.data.experimental.AUTOTUNE)
data = data.batch(batch_size, drop_remainder=True)
def _mixup(data):
beta_dist = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
beta = tf.cast(beta_dist.sample([]), tf.float32)
data['image'] = (beta * data['image'] +
(1 - beta) * tf.reverse(data['image'], axis=[0]))
data['label'] = (beta * data['label'] +
(1 - beta) * tf.reverse(data['label'], axis=[0]))
return data
if mixup_alpha is not None and mixup_alpha > 0.0 and mode == 'train':
data = data.map(_mixup, tf.data.experimental.AUTOTUNE)
# Shard data such that it can be distributed accross devices
def _shard(data):
data['image'] = tf.reshape(data['image'],
[num_devices, -1, crop_size, crop_size, 3])
data['label'] = tf.reshape(data['label'],
[num_devices, -1, dataset_info['num_classes']])
return data
if num_devices is not None:
data = data.map(_shard, tf.data.experimental.AUTOTUNE)
return data.prefetch(1)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_resolution(original_resolution):
"""Takes (H,W) and returns (precrop, crop)."""
area = original_resolution[0] * original_resolution[1]
return (160, 128) if area < 96*96 else (512, 480)
known_dataset_sizes = {
'cifar10': (32, 32),
'cifar100': (32, 32),
'oxford_iiit_pet': (224, 224),
'oxford_flowers102': (224, 224),
'imagenet2012': (224, 224),
}
def get_resolution_from_dataset(dataset):
if dataset not in known_dataset_sizes:
raise ValueError(f"Unsupported dataset {dataset}. Add your own here :)")
return get_resolution(known_dataset_sizes[dataset])
def get_mixup(dataset_size):
return 0.0 if dataset_size < 20_000 else 0.1
def get_schedule(dataset_size):
if dataset_size < 20_000:
return [100, 200, 300, 400, 500]
elif dataset_size < 500_000:
return [500, 3000, 6000, 9000, 10_000]
else:
return [500, 6000, 12_000, 18_000, 20_000]
def get_lr(step, dataset_size, base_lr=0.003):
"""Returns learning-rate for `step` or None at the end."""
supports = get_schedule(dataset_size)
# Linear warmup
if step < supports[0]:
return base_lr * step / supports[0]
# End of training
elif step >= supports[-1]:
return None
# Staircase decays by factor of 10
else:
for s in supports[1:]:
if s < step:
base_lr /= 10
return base_lr
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding: utf-8
import argparse
import logging
import logging.config
import os
import bit_hyperrule
def argparser(known_models):
parser = argparse.ArgumentParser(description="Fine-tune BiT-M model.")
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring and checkpointing.")
parser.add_argument("--model", choices=list(known_models),
help="Which variant to use; BiT-M gives best results.")
parser.add_argument("--logdir", required=True,
help="Where to log training info (small).")
parser.add_argument("--bit_pretrained_dir", default='.',
help="Where to search for pretrained BiT models.")
parser.add_argument("--dataset", choices=list(bit_hyperrule.known_dataset_sizes.keys()),
help="Choose the dataset. It should be easy to add your own! "
"Don't forget to set --datadir if necessary.")
parser.add_argument("--examples_per_class", type=int, default=None,
help="For the few-shot variant, use this many examples "
"per class only.")
parser.add_argument("--examples_per_class_seed", type=int, default=0,
help="Random seed for selecting examples.")
parser.add_argument("--batch", type=int, default=512,
help="Batch size.")
parser.add_argument("--batch_split", type=int, default=1,
help="Number of batches to compute gradient on before updating weights.")
parser.add_argument("--base_lr", type=float, default=0.003,
help="Base learning-rate for fine-tuning. Most likely default is best.")
parser.add_argument("--eval_every", type=int, default=None,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
return parser
def setup_logger(args):
"""Creates and returns a fancy logger."""
# return logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(message)s")
# Why is setting up proper logging so !@?#! ugly?
os.makedirs(os.path.join(args.logdir, args.name), exist_ok=True)
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
},
},
"handlers": {
"stderr": {
"level": "INFO",
"formatter": "standard",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"logfile": {
"level": "DEBUG",
"formatter": "standard",
"class": "logging.FileHandler",
"filename": os.path.join(args.logdir, args.name, "train.log"),
"mode": "a",
}
},
"loggers": {
"": {
"handlers": ["stderr", "logfile"],
"level": "DEBUG",
"propagate": True
},
}
})
logger = logging.getLogger(__name__)
logger.flush = lambda: [h.flush() for h in logger.handlers]
logger.info(args)
return logger
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.numpy as jnp
import flax.nn as nn
def fixed_padding(x, kernel_size):
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = jax.lax.pad(x, 0.0,
((0, 0, 0),
(pad_beg, pad_end, 0), (pad_beg, pad_end, 0),
(0, 0, 0)))
return x
def standardize(x, axis, eps):
x = x - jnp.mean(x, axis=axis, keepdims=True)
x = x / jnp.sqrt(jnp.mean(jnp.square(x), axis=axis, keepdims=True) + eps)
return x
class GroupNorm(nn.Module):
"""Group normalization (arxiv.org/abs/1803.08494)."""
def apply(self, x, num_groups=32):
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
# Standardize along spatial and group dimensions
x = standardize(x, axis=[1, 2, 4], eps=1e-5)
x = x.reshape(input_shape)
bias_scale_shape = tuple([1, 1, 1] + [input_shape[-1]])
x = x * self.param('scale', bias_scale_shape, nn.initializers.ones)
x = x + self.param('bias', bias_scale_shape, nn.initializers.zeros)
return x
class StdConv(nn.Conv):
def param(self, name, shape, initializer):
param = super().param(name, shape, initializer)
if name == 'kernel':
param = standardize(param, axis=[0, 1, 2], eps=1e-10)
return param
class RootBlock(nn.Module):
def apply(self, x, width):
x = fixed_padding(x, 7)
x = StdConv(x, width, (7, 7), (2, 2),
padding="VALID",
bias=False,
name="conv_root")
x = fixed_padding(x, 3)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding="VALID")
return x
class ResidualUnit(nn.Module):
"""Bottleneck ResNet block."""
def apply(self, x, nout, strides=(1, 1)):
x_shortcut = x
needs_projection = x.shape[-1] != nout * 4 or strides != (1, 1)
group_norm = GroupNorm
conv = StdConv.partial(bias=False)
x = group_norm(x, name="gn1")
x = nn.relu(x)
if needs_projection:
x_shortcut = conv(x, nout * 4, (1, 1), strides, name="conv_proj")
x = conv(x, nout, (1, 1), name="conv1")
x = group_norm(x, name="gn2")
x = nn.relu(x)
x = fixed_padding(x, 3)
x = conv(x, nout, (3, 3), strides, name="conv2", padding='VALID')
x = group_norm(x, name="gn3")
x = nn.relu(x)
x = conv(x, nout * 4, (1, 1), name="conv3")
return x + x_shortcut
class ResidualBlock(nn.Module):
def apply(self, x, block_size, nout, first_stride):
x = ResidualUnit(
x, nout, strides=first_stride,
name="unit01")
for i in range(1, block_size):
x = ResidualUnit(
x, nout, strides=(1, 1),
name=f"unit{i+1:02d}")
return x
class ResNet(nn.Module):
"""ResNetV2."""
def apply(self, x, num_classes=1000,
width_factor=1, num_layers=50):
block_sizes = _block_sizes[num_layers]
width = 64 * width_factor
root_block = RootBlock.partial(width=width)
x = root_block(x, name='root_block')
# Blocks
for i, block_size in enumerate(block_sizes):
x = ResidualBlock(x, block_size, width * 2 ** i,
first_stride=(1, 1) if i == 0 else (2, 2),
name=f"block{i + 1}")
# Pre-head
x = GroupNorm(x, name='norm-pre-head')
x = nn.relu(x)
x = jnp.mean(x, axis=(1, 2))
# Head
x = nn.Dense(x, num_classes, name="conv_head",
kernel_init=nn.initializers.zeros)
return x.astype(jnp.float32)
_block_sizes = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}
KNOWN_MODELS = dict(
[(bit + f'-R{l}x{w}', ResNet.partial(num_layers=l, width_factor=w))
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (152, 2), (101, 3), (152, 4)]]
)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
import flax.optim as optim
import flax.jax_utils as flax_utils
import input_pipeline_tf2_or_jax as input_pipeline
import bit_jax.models as models
import bit_jax.tf2jax as tf2jax
import bit_common
import bit_hyperrule
def main(args):
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {jax.devices()}')
model = models.KNOWN_MODELS[args.model]
# Load weigths of a BiT model
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.npz')
if not os.path.exists(bit_model_file):
raise FileNotFoundError(
f'Model file is not found in "{args.bit_pretrained_dir}" directory.')
with open(bit_model_file, 'rb') as f:
params_tf = np.load(f)
params_tf = dict(zip(params_tf.keys(), params_tf.values()))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(
args.dataset)
# Setup input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
data_train = input_pipeline.get_data(
dataset=args.dataset,
mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_train)
data_test = input_pipeline.get_data(
dataset=args.dataset,
mode='test',
repeats=1, batch_size=args.batch_eval,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=None, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_test)
# Build ResNet architecture
ResNet = model.partial(num_classes=dataset_info['num_classes'])
_, params = ResNet.init_by_shape(
jax.random.PRNGKey(0),
[([1, crop_size, crop_size, 3], jnp.float32)])
resnet_fn = ResNet.call
# pmap replicates the models over all GPUs
resnet_fn_repl = jax.pmap(ResNet.call)
def cross_entropy_loss(*, logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * labels, axis=1))
def loss_fn(params, images, labels):
logits = resnet_fn(params, images)
return cross_entropy_loss(logits=logits, labels=labels)
# Update step, replicated over all GPUs
@partial(jax.pmap, axis_name='batch')
def update_fn(opt, lr, batch):
l, g = jax.value_and_grad(loss_fn)(opt.target,
batch['image'],
batch['label'])
g = jax.tree_map(lambda x: jax.lax.pmean(x, axis_name='batch'), g)
opt = opt.apply_gradient(g, learning_rate=lr)
return opt
# In-place update of randomly initialized weights by BiT weigths
tf2jax.transform_params(params, params_tf,
num_classes=dataset_info['num_classes'])
# Create optimizer and replicate it over all GPUs
opt = optim.Momentum(beta=0.9).create(params)
opt_repl = flax_utils.replicate(opt)
# Delete referenes to the objects that are not needed anymore
del opt
del params
total_steps = bit_hyperrule.get_schedule(dataset_info['num_examples'])[-1]
# Run training loop
for step, batch in zip(range(1, total_steps + 1),
data_train.as_numpy_iterator()):
lr = bit_hyperrule.get_lr(step - 1,
dataset_info['num_examples'],
args.base_lr)
opt_repl = update_fn(opt_repl, flax_utils.replicate(lr), batch)
# Run eval step
if ((args.eval_every and step % args.eval_every == 0)
or (step == total_steps)):
accuracy_test = np.mean([
c
for batch in data_test.as_numpy_iterator()
for c in (
np.argmax(resnet_fn_repl(opt_repl.target, batch['image']), axis=2) ==
np.argmax(batch['label'], axis=2)).ravel()])
logger.info(
f'Step: {step}, '
f'learning rate: {lr:.07f}, '
f'Test accuracy: {accuracy_test:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import re
def transform_params(params, params_tf, num_classes):
# BiT and JAX models have different naming conventions, so we need to
# properly map TF weights to JAX weights
params['root_block']['conv_root']['kernel'] = (
params_tf['resnet/root_block/standardized_conv2d/kernel'])
for block in ['block1', 'block2', 'block3', 'block4']:
units = set([re.findall(r'unit\d+', p)[0] for p in params_tf.keys()
if p.find(block) >= 0])
for unit in units:
for i, group in enumerate(['a', 'b', 'c']):
params[block][unit][f'conv{i+1}']['kernel'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'standardized_conv2d/kernel'])
params[block][unit][f'gn{i+1}']['bias'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'group_norm/beta'][None, None, None])
params[block][unit][f'gn{i+1}']['scale'] = (
params_tf[f'resnet/{block}/{unit}/{group}/'
'group_norm/gamma'][None, None, None])
projs = [p for p in params_tf.keys()
if p.find(f'{block}/{unit}/a/proj') >= 0]
assert len(projs) <= 1
if projs:
params[block][unit]['conv_proj']['kernel'] = params_tf[projs[0]]
params['norm-pre-head']['bias'] = (
params_tf['resnet/group_norm/beta'][None, None, None])
params['norm-pre-head']['scale'] = (
params_tf['resnet/group_norm/gamma'][None, None, None])
params['conv_head']['kernel'] = np.zeros(
(params['conv_head']['kernel'].shape[0], num_classes), dtype=np.float32)
params['conv_head']['bias'] = np.zeros(num_classes, dtype=np.float32)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Bottleneck ResNet v2 with GroupNorm and Weight Standardization."""
from collections import OrderedDict # pylint: disable=g-importing-member
import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride,
padding=1, bias=bias, groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride,
padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout//4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original code has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if (stride != 1 or cin != cout):
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
# Unit's branch
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
# The following will be unreadable if we split lines.
# pylint: disable=line-too-long
self.root = nn.Sequential(OrderedDict([
('conv', StdConv2d(3, 64*wf, kernel_size=7, stride=2, padding=3, bias=False)),
('pad', nn.ConstantPad2d(1, 0)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=64*wf, cout=256*wf, cmid=64*wf))] +
[(f'unit{i:02d}', PreActBottleneck(cin=256*wf, cout=256*wf, cmid=64*wf)) for i in range(2, block_units[0] + 1)],
))),
('block2', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=256*wf, cout=512*wf, cmid=128*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=512*wf, cout=512*wf, cmid=128*wf)) for i in range(2, block_units[1] + 1)],
))),
('block3', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=512*wf, cout=1024*wf, cmid=256*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=1024*wf, cout=1024*wf, cmid=256*wf)) for i in range(2, block_units[2] + 1)],
))),
('block4', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=1024*wf, cout=2048*wf, cmid=512*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=2048*wf, cout=2048*wf, cmid=512*wf)) for i in range(2, block_units[3] + 1)],
))),
]))
# pylint: enable=line-too-long
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([
('gn', nn.GroupNorm(32, 2048*wf)),
('relu', nn.ReLU(inplace=True)),
('avg', nn.AdaptiveAvgPool2d(output_size=1)),
('conv', nn.Conv2d(2048*wf, head_size, kernel_size=1, bias=True)),
]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[...,0,0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) # pylint: disable=line-too-long
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel'])) # pylint: disable=line-too-long
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
KNOWN_MODELS = OrderedDict([
('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
])
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Various utilities from my toolbox at github.com/lucasb-eyer/lbtoolbox."""
import collections
import json
import signal
import time
import numpy as np
class Uninterrupt:
"""Context manager to gracefully handle interrupts.
Use as:
with Uninterrupt() as u:
while not u.interrupted:
# train
"""
def __init__(self, sigs=(signal.SIGINT, signal.SIGTERM), verbose=False):
self.sigs = sigs
self.verbose = verbose
self.interrupted = False
self.orig_handlers = None
def __enter__(self):
if self.orig_handlers is not None:
raise ValueError("Can only enter `Uninterrupt` once!")
self.interrupted = False
self.orig_handlers = [signal.getsignal(sig) for sig in self.sigs]
def handler(signum, frame):
del signum # unused
del frame # unused
self.release()
self.interrupted = True
if self.verbose:
print("Interruption scheduled...", flush=True)
for sig in self.sigs:
signal.signal(sig, handler)
return self
def __exit__(self, type_, value, tb):
self.release()
def release(self):
if self.orig_handlers is not None:
for sig, orig in zip(self.sigs, self.orig_handlers):
signal.signal(sig, orig)
self.orig_handlers = None
class Timer:
"""Context timing its scope."""
def __init__(self, donecb):
self.cb = donecb
def __enter__(self):
self.t0 = time.time()
def __exit__(self, exc_type, exc_value, traceback):
t = time.time() - self.t0
self.cb(t)
class Chrono:
"""Chronometer for poor-man's (but convenient!) profiling."""
def __init__(self):
self.timings = collections.OrderedDict()
def measure(self, what):
return Timer(lambda t: self._done(what, t))
def _done(self, what, t):
self.timings.setdefault(what, []).append(t)
def times(self, what):
return self.timings[what]
def avgtime(self, what, dropfirst=False):
timings = self.timings[what]
if dropfirst and len(timings) > 1:
timings = timings[1:]
return sum(timings)/len(timings)
def __str__(self, fmt="{:{w}.5f}", dropfirst=False):
avgtimes = {k: self.avgtime(k, dropfirst) for k in self.timings}
l = max(map(len, avgtimes))
w = max(len(fmt.format(v, w=0)) for v in avgtimes.values())
avg_by_time = sorted(avgtimes.items(), key=lambda t: t[1], reverse=True)
return "\n".join(f"{name:{l}s}: " + fmt.format(t, w=w) + "s"
for name, t in avg_by_time)
def create_dat(basename, dtype, shape, fillvalue=None, **meta):
"""Creates mem-mapped numpy array plus metadata.
Creates a data file at `basename` and returns a writeable mem-map backed
numpy array to it. Can also be passed any json-serializable keys and values
in `meta`.
"""
xm = np.memmap(basename, mode="w+", dtype=dtype, shape=shape)
xa = np.ndarray.__new__(np.ndarray, dtype=dtype, shape=shape, buffer=xm)
# xa.flush = xm.flush # Sadly, we can't just add attributes to a numpy array, need to subclass it.
if fillvalue is not None:
xa.fill(fillvalue)
# xa.flush()
xm.flush()
meta.setdefault("dtype", np.dtype(dtype).str)
meta.setdefault("shape", shape)
json.dump(meta, open(basename + ".json", "w+"))
return xa
def load_dat(basename, mode="r"):
"""Loads file created via `create_dat` as mem-mapped numpy array.
Returns a read-only mem-mapped numpy array to file at `basename`.
If `mode` is set to `'r+'`, the data can be written, too.
"""
desc = json.load(open(basename + ".json", "r"))
dtype, shape = desc["dtype"], desc["shape"]
xm = np.memmap(basename, mode=mode, dtype=dtype, shape=shape)
xa = np.ndarray.__new__(np.ndarray, dtype=dtype, shape=shape, buffer=xm)
# xa.flush = xm.flush # Sadly, we can't just add attributes to a numpy array, need to subclass it.
return xa
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fine-tune a BiT model on some downstream dataset."""
#!/usr/bin/env python3
# coding: utf-8
from os.path import join as pjoin # pylint: disable=g-importing-member
import time
import numpy as np
import torch
import torchvision as tv
import bit_pytorch.fewshot as fs
import bit_pytorch.lbtoolbox as lb
import bit_pytorch.models as models
import bit_common
import bit_hyperrule
def topk(output, target, ks=(1,)):
"""Returns one boolean vector for each k, whether the target is within the output's top-k."""
_, pred = output.topk(max(ks), 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [correct[:k].max(0)[0] for k in ks]
def recycle(iterable):
"""Variant of itertools.cycle that does not save iterates."""
while True:
for i in iterable:
yield i
def mktrainval(args, logger):
"""Returns train and validation datasets."""
precrop, crop = bit_hyperrule.get_resolution_from_dataset(args.dataset)
train_tx = tv.transforms.Compose([
tv.transforms.Resize((precrop, precrop)),
tv.transforms.RandomCrop((crop, crop)),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
val_tx = tv.transforms.Compose([
tv.transforms.Resize((crop, crop)),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if args.dataset == "cifar10":
train_set = tv.datasets.CIFAR10(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR10(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "cifar100":
train_set = tv.datasets.CIFAR100(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR100(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "imagenet2012":
train_set = tv.datasets.ImageFolder(pjoin(args.datadir, "train"), train_tx)
valid_set = tv.datasets.ImageFolder(pjoin(args.datadir, "val"), val_tx)
else:
raise ValueError(f"Sorry, we have not spent time implementing the "
f"{args.dataset} dataset in the PyTorch codebase. "
f"In principle, it should be easy to add :)")
if args.examples_per_class is not None:
logger.info(f"Looking for {args.examples_per_class} images per class...")
indices = fs.find_fewshot_indices(train_set, args.examples_per_class)
train_set = torch.utils.data.Subset(train_set, indices=indices)
logger.info(f"Using a training set with {len(train_set)} images.")
logger.info(f"Using a validation set with {len(valid_set)} images.")
micro_batch_size = args.batch // args.batch_split
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=micro_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
if micro_batch_size <= len(train_set):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=False)
else:
# In the few-shot cases, the total dataset size might be smaller than the batch-size.
# In these cases, the default sampler doesn't repeat, so we need to make it do that
# if we want to match the behaviour from the paper.
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, num_workers=args.workers, pin_memory=True,
sampler=torch.utils.data.RandomSampler(train_set, replacement=True, num_samples=micro_batch_size))
return train_set, valid_set, train_loader, valid_loader
def run_eval(model, data_loader, device, chrono, logger, step):
# switch to evaluate mode
model.eval()
logger.info("Running validation...")
logger.flush()
all_c, all_top1, all_top5 = [], [], []
end = time.time()
for b, (x, y) in enumerate(data_loader):
with torch.no_grad():
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# measure data loading time
chrono._done("eval load", time.time() - end)
# compute output, measure accuracy and record loss.
with chrono.measure("eval fprop"):
logits = model(x)
c = torch.nn.CrossEntropyLoss(reduction='none')(logits, y)
top1, top5 = topk(logits, y, ks=(1, 5))
all_c.extend(c.cpu()) # Also ensures a sync point.
all_top1.extend(top1.cpu())
all_top5.extend(top5.cpu())
# measure elapsed time
end = time.time()
model.train()
logger.info(f"Validation@{step} loss {np.mean(all_c):.5f}, "
f"top1 {np.mean(all_top1):.2%}, "
f"top5 {np.mean(all_top5):.2%}")
logger.flush()
return all_c, all_top1, all_top5
def mixup_data(x, y, l):
"""Returns mixed inputs, pairs of targets, and lambda"""
indices = torch.randperm(x.shape[0]).to(x.device)
mixed_x = l * x + (1 - l) * x[indices]
y_a, y_b = y, y[indices]
return mixed_x, y_a, y_b
def mixup_criterion(criterion, pred, y_a, y_b, l):
return l * criterion(pred, y_a) + (1 - l) * criterion(pred, y_b)
def main(args):
logger = bit_common.setup_logger(args)
# Lets cuDNN benchmark conv implementations and choose the fastest.
# Only good if sizes stay the same within the main loop!
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Going to train on {device}")
train_set, valid_set, train_loader, valid_loader = mktrainval(args, logger)
logger.info(f"Loading model from {args.model}.npz")
model = models.KNOWN_MODELS[args.model](head_size=len(valid_set.classes), zero_head=True)
model.load_from(np.load(f"{args.model}.npz"))
logger.info("Moving model onto all GPUs")
model = torch.nn.DataParallel(model)
# Optionally resume from a checkpoint.
# Load it to CPU first as we'll move the model to GPU later.
# This way, we save a little bit of GPU memory when loading.
step = 0
# Note: no weight-decay!
optim = torch.optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
# Resume fine-tuning if we find a saved model.
savename = pjoin(args.logdir, args.name, "bit.pth.tar")
try:
logger.info(f"Model will be saved in '{savename}'")
checkpoint = torch.load(savename, map_location="cpu")
logger.info(f"Found saved model to resume from at '{savename}'")
step = checkpoint["step"]
model.load_state_dict(checkpoint["model"])
optim.load_state_dict(checkpoint["optim"])
logger.info(f"Resumed at step {step}")
except FileNotFoundError:
logger.info("Fine-tuning from BiT")
model = model.to(device)
optim.zero_grad()
model.train()
mixup = bit_hyperrule.get_mixup(len(train_set))
cri = torch.nn.CrossEntropyLoss().to(device)
logger.info("Starting training!")
chrono = lb.Chrono()
accum_steps = 0
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
end = time.time()
with lb.Uninterrupt() as u:
for x, y in recycle(train_loader):
# measure data loading time, which is spent in the `for` statement.
chrono._done("load", time.time() - end)
if u.interrupted:
break
# Schedule sending to GPU(s)
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# Update learning-rate, including stop training if over.
lr = bit_hyperrule.get_lr(step, len(train_set), args.base_lr)
if lr is None:
break
for param_group in optim.param_groups:
param_group["lr"] = lr
if mixup > 0.0:
x, y_a, y_b = mixup_data(x, y, mixup_l)
# compute output
with chrono.measure("fprop"):
logits = model(x)
if mixup > 0.0:
c = mixup_criterion(cri, logits, y_a, y_b, mixup_l)
else:
c = cri(logits, y)
c_num = float(c.data.cpu().numpy()) # Also ensures a sync point.
# Accumulate grads
with chrono.measure("grads"):
(c / args.batch_split).backward()
accum_steps += 1
accstep = f" ({accum_steps}/{args.batch_split})" if args.batch_split > 1 else ""
logger.info(f"[step {step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})") # pylint: disable=logging-format-interpolation
logger.flush()
# Update params
if accum_steps == args.batch_split:
with chrono.measure("update"):
optim.step()
optim.zero_grad()
step += 1
accum_steps = 0
# Sample new mixup ratio for next batch
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
# Run evaluation and save the model.
if args.eval_every and step % args.eval_every == 0:
run_eval(model, valid_loader, device, chrono, logger, step)
if args.save:
torch.save({
"step": step,
"model": model.state_dict(),
"optim" : optim.state_dict(),
}, savename)
end = time.time()
# Final eval at end of training.
run_eval(model, valid_loader, device, chrono, logger, step='end')
logger.info(f"Timings:\n{chrono}")
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--datadir", required=True,
help="Path to the ImageNet data folder, preprocessed for torchvision.")
parser.add_argument("--workers", type=int, default=8,
help="Number of background threads used to load data.")
parser.add_argument("--no-save", dest="save", action="store_false")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility to find k-shot dataset indices, outputs the indices on stdout."""
#!/usr/bin/env python3
# coding: utf-8
from collections import *
from functools import *
import random
import sys
import torch
import torchvision as tv
class AddIndexIter(torch.utils.data.dataloader._SingleProcessDataLoaderIter):
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = torch.utils.data._utils.pin_memory.pin_memory(data)
return index, data
def find_indices_loader(loader, n_shots, n_classes):
per_label_indices = defaultdict(partial(deque, maxlen=n_shots))
for ibatch, (indices, (images, labels)) in enumerate(AddIndexIter(loader)):
for idx, lbl in zip(indices, labels):
per_label_indices[lbl.item()].append(idx)
findings = sum(map(len, per_label_indices.values()))
if findings == n_shots * n_classes:
return per_label_indices
raise RuntimeError("Unable to find enough examples!")
def find_fewshot_indices(dataset, n_shots):
n_classes = len(dataset.classes)
orig_transform = dataset.transform
dataset.transform = tv.transforms.Compose([
tv.transforms.CenterCrop(1),
tv.transforms.ToTensor()
])
# TODO(lbeyer): if dataset isinstance DatasetFolder, we can (maybe?) do much better!
loader = torch.utils.data.DataLoader(dataset, batch_size=1024, shuffle=True, num_workers=0)
per_label_indices = find_indices_loader(loader, n_shots, n_classes)
all_indices = [i for indices in per_label_indices.values() for i in indices]
random.shuffle(all_indices)
dataset.transform = orig_transform
return all_indices
if __name__ == "__main__":
dataset = tv.datasets.ImageFolder(sys.argv[2], preprocess)
all_indices = find_fewshot_indices(dataset, int(sys.argv[1]))
for i in all_indices:
print(i)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet architecture as used in BiT."""
import tensorflow.compat.v2 as tf
from . import normalization
def add_name_prefix(name, prefix=None):
return prefix + "/" + name if prefix else name
class ReLU(tf.keras.layers.ReLU):
def compute_output_shape(self, input_shape):
return tf.TensorShape(input_shape)
class PaddingFromKernelSize(tf.keras.layers.Layer):
"""Layer that adds padding to an image taking into a given kernel size."""
def __init__(self, kernel_size, **kwargs):
super(PaddingFromKernelSize, self).__init__(**kwargs)
pad_total = kernel_size - 1
self._pad_beg = pad_total // 2
self._pad_end = pad_total - self._pad_beg
def compute_output_shape(self, input_shape):
batch_size, height, width, channels = tf.TensorShape(input_shape).as_list()
if height is not None:
height = height + self._pad_beg + self._pad_end
if width is not None:
width = width + self._pad_beg + self._pad_end
return tf.TensorShape((batch_size, height, width, channels))
def call(self, x):
padding = [
[0, 0],
[self._pad_beg, self._pad_end],
[self._pad_beg, self._pad_end],
[0, 0]]
return tf.pad(x, padding)
class StandardizedConv2D(tf.keras.layers.Conv2D):
"""Implements the abs/1903.10520 technique (see go/dune-gn).
You can simply replace any Conv2D with this one to use re-parametrized
convolution operation in which the kernels are standardized before conv.
Note that it does not come with extra learnable scale/bias parameters,
as those used in "Weight normalization" (abs/1602.07868). This does not
matter if combined with BN/GN/..., but it would matter if the convolution
was used standalone.
Author: Lucas Beyer
"""
def build(self, input_shape):
super(StandardizedConv2D, self).build(input_shape)
# Wrap a standardization around the conv OP.
default_conv_op = self._convolution_op
def standardized_conv_op(inputs, kernel):
# Kernel has shape HWIO, normalize over HWI
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
# Author code uses std + 1e-5
return default_conv_op(inputs, (kernel - mean) / tf.sqrt(var + 1e-10))
self._convolution_op = standardized_conv_op
self.built = True
class BottleneckV2Unit(tf.keras.layers.Layer):
"""Implements a standard ResNet's unit (version 2).
"""
def __init__(self, num_filters, stride=1, **kwargs):
"""Initializer.
Args:
num_filters: number of filters in the bottleneck.
stride: specifies block's stride.
**kwargs: other tf.keras.layers.Layer keyword arguments.
"""
super(BottleneckV2Unit, self).__init__(**kwargs)
self._num_filters = num_filters
self._stride = stride
self._proj = None
self._unit_a = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
], name="a")
self._unit_a_conv = StandardizedConv2D(
filters=num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/standardized_conv2d")
self._unit_b = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
PaddingFromKernelSize(kernel_size=3),
StandardizedConv2D(
filters=num_filters,
kernel_size=3,
strides=stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="b")
self._unit_c = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
StandardizedConv2D(
filters=4 * num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="c")
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
# Add projection layer if necessary.
if (self._stride > 1) or (4 * self._num_filters != input_shape[-1]):
self._proj = StandardizedConv2D(
filters=4 * self._num_filters,
kernel_size=1,
strides=self._stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/proj/standardized_conv2d")
self.built = True
def compute_output_shape(self, input_shape):
current_shape = self._unit_a.compute_output_shape(input_shape)
current_shape = self._unit_a_conv.compute_output_shape(current_shape)
current_shape = self._unit_b.compute_output_shape(current_shape)
current_shape = self._unit_c.compute_output_shape(current_shape)
return current_shape
def call(self, x):
x_shortcut = x
# Unit "a".
x = self._unit_a(x)
if self._proj is not None:
x_shortcut = self._proj(x)
x = self._unit_a_conv(x)
# Unit "b".
x = self._unit_b(x)
# Unit "c".
x = self._unit_c(x)
return x + x_shortcut
class ResnetV2(tf.keras.Model):
"""Generic ResnetV2 architecture, as used in the BiT paper."""
def __init__(self,
num_units=(3, 4, 6, 3),
num_outputs=1000,
filters_factor=4,
strides=(1, 2, 2, 2),
**kwargs):
super(ResnetV2, self).__init__(**kwargs)
num_blocks = len(num_units)
num_filters = tuple(16 * filters_factor * 2**b for b in range(num_blocks))
self._root = self._create_root_block(num_filters=num_filters[0])
self._blocks = []
for b, (f, u, s) in enumerate(zip(num_filters, num_units, strides), 1):
n = "block{}".format(b)
self._blocks.append(
self._create_block(num_units=u, num_filters=f, stride=s, name=n))
self._pre_head = [
normalization.GroupNormalization(name="group_norm"),
ReLU(),
tf.keras.layers.GlobalAveragePooling2D()
]
self._head = None
if num_outputs:
self._head = tf.keras.layers.Dense(
units=num_outputs,
use_bias=True,
kernel_initializer="zeros",
trainable=self.trainable,
name="head/dense")
def _create_root_block(self,
num_filters,
conv_size=7,
conv_stride=2,
pool_size=3,
pool_stride=2):
layers = [
PaddingFromKernelSize(conv_size),
StandardizedConv2D(
filters=num_filters,
kernel_size=conv_size,
strides=conv_stride,
trainable=self.trainable,
use_bias=False,
name="standardized_conv2d"),
PaddingFromKernelSize(pool_size),
tf.keras.layers.MaxPool2D(
pool_size=pool_size, strides=pool_stride, padding="valid")
]
return tf.keras.Sequential(layers, name="root_block")
def _create_block(self, num_units, num_filters, stride, name):
layers = []
for i in range(1, num_units + 1):
layers.append(
BottleneckV2Unit(
num_filters=num_filters,
stride=(stride if i == 1 else 1),
name="unit%02d" % i))
return tf.keras.Sequential(layers, name=name)
def compute_output_shape(self, input_shape):
current_shape = self._root.compute_output_shape(input_shape)
for block in self._blocks:
current_shape = block.compute_output_shape(current_shape)
for layer in self._pre_head:
current_shape = layer.compute_output_shape(current_shape)
if self._head is not None:
batch_size, features = current_shape.as_list()
current_shape = (batch_size, 1, 1, features)
current_shape = self._head.compute_output_shape(current_shape).as_list()
current_shape = (current_shape[0], current_shape[3])
return tf.TensorShape(current_shape)
def call(self, x):
x = self._root(x)
for block in self._blocks:
x = block(x)
for layer in self._pre_head:
x = layer(x)
if self._head is not None:
x = self._head(x)
return x
KNOWN_MODELS = {
f'{bit}-R{l}x{w}': f'gs://bit_models/{bit}-R{l}x{w}.h5'
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (101, 3), (152, 4)]
}
NUM_UNITS = {
k: (3, 4, 6, 3) if 'R50' in k else
(3, 4, 23, 3) if 'R101' in k else
(3, 8, 36, 3)
for k in KNOWN_MODELS
}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding: utf-8
from functools import partial
import time
import os
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import bit_common
import bit_hyperrule
import bit_tf2.models as models
import input_pipeline_tf2_or_jax as input_pipeline
def reshape_for_keras(features, batch_size, crop_size):
features["image"] = tf.reshape(features["image"], (batch_size, crop_size, crop_size, 3))
features["label"] = tf.reshape(features["label"], (batch_size, -1))
return (features["image"], features["label"])
class BiTLRSched(tf.keras.callbacks.Callback):
def __init__(self, base_lr, num_samples):
self.step = 0
self.base_lr = base_lr
self.num_samples = num_samples
def on_train_batch_begin(self, batch, logs=None):
lr = bit_hyperrule.get_lr(self.step, self.num_samples, self.base_lr)
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
self.step += 1
def main(args):
tf.io.gfile.makedirs(args.logdir)
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {tf.config.list_physical_devices()}')
tf.io.gfile.makedirs(args.bit_pretrained_dir)
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.h5')
if not tf.io.gfile.exists(bit_model_file):
model_url = models.KNOWN_MODELS[args.model]
logger.info(f'Downloading the model from {model_url}...')
tf.io.gfile.copy(model_url, bit_model_file)
# Set up input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
# Distribute training
strategy = tf.distribute.MirroredStrategy()
num_devices = strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(args.dataset)
data_train = input_pipeline.get_data(
dataset=args.dataset, mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_test = input_pipeline.get_data(
dataset=args.dataset, mode='test',
repeats=1, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=1, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_train = data_train.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
data_test = data_test.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
with strategy.scope():
filters_factor = int(args.model[-1])*4
model = models.ResnetV2(
num_units=models.NUM_UNITS[args.model],
num_outputs=21843,
filters_factor=filters_factor,
name="resnet",
trainable=True,
dtype=tf.float32)
model.build((None, None, None, 3))
logger.info(f'Loading weights...')
model.load_weights(bit_model_file)
logger.info(f'Weights loaded into model!')
model._head = tf.keras.layers.Dense(
units=dataset_info['num_classes'],
use_bias=True,
kernel_initializer="zeros",
trainable=True,
name="head/dense")
lr_supports = bit_hyperrule.get_schedule(dataset_info['num_examples'])
schedule_length = lr_supports[-1]
# NOTE: Let's not do that unless verified necessary and we do the same
# across all three codebases.
# schedule_length = schedule_length * 512 / args.batch
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
logger.info(f'Fine-tuning the model...')
steps_per_epoch = args.eval_every or schedule_length
history = model.fit(
data_train,
steps_per_epoch=steps_per_epoch,
epochs=schedule_length // steps_per_epoch,
validation_data=data_test, # here we are only using
# this data to evaluate our performance
callbacks=[BiTLRSched(args.base_lr, dataset_info['num_examples'])],
)
for epoch, accu in enumerate(history.history['val_accuracy']):
logger.info(
f'Step: {epoch * args.eval_every}, '
f'Test accuracy: {accu:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group normalization."""
import tensorflow.compat.v2 as tf
def group_normalize(x, gamma, beta, num_groups=None, group_size=None, eps=1e-5):
"""Applies group-normalization to NHWC `x` (see abs/1803.08494, go/dune-gn).
This function just does the math, if you want a "layer" that creates the
necessary variables etc., see `group_norm` below.
You must either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
Args:
x: N..C-tensor, the input to group-normalize. For images, this would be a
NHWC-tensor, for time-series a NTC, for videos a NHWTC or NTHWC, all of
them work, as normalization includes everything between N and C. Even just
NC shape works, as C is grouped and normalized.
gamma: tensor with C entries, learnable scale after normalization.
beta: tensor with C entries, learnable bias after normalization.
num_groups: int, number of groups to normalize over (divides C).
group_size: int, size of the groups to normalize over (divides C).
eps: float, a small additive constant to avoid /sqrt(0).
Returns:
Group-normalized `x`, of the same shape and type as `x`.
Author: Lucas Beyer
"""
assert x.shape.ndims >= 2, (
"Less than 2-dim Tensor passed to GroupNorm. Something's fishy.")
num_channels = x.shape[-1]
assert num_channels is not None, "Cannot apply GroupNorm on dynamic channels."
assert (num_groups is None) != (group_size is None), (
"You must specify exactly one of `num_groups`, `group_size`")
if group_size is not None:
num_groups = num_channels // group_size
assert num_channels % num_groups == 0, (
"GroupNorm: {} not divisible by {}".format(num_channels, num_groups))
orig_shape = tf.shape(x)
# This shape is NHWGS where G is #groups and S is group-size.
extra_shape = [num_groups, num_channels // num_groups]
group_shape = tf.concat([orig_shape[:-1], extra_shape], axis=-1)
x = tf.reshape(x, group_shape)
# The dimensions to normalize over: HWS for images, but more generally all
# dimensions except N (batch, first) and G (cross-groups, next-to-last).
# So more visually, normdims are the dots in N......G. (note the last one is
# also a dot, not a full-stop, argh!)
normdims = list(range(1, x.shape.ndims - 2)) + [x.shape.ndims - 1]
mean, var = tf.nn.moments(x, normdims, keepdims=True)
# Interestingly, we don't have a beta/gamma per group, but still one per
# channel, at least according to the original paper. Reshape such that they
# broadcast correctly.
beta = tf.reshape(beta, extra_shape)
gamma = tf.reshape(gamma, extra_shape)
x = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return tf.reshape(x, orig_shape)
class GroupNormalization(tf.keras.layers.Layer):
"""A group-norm "layer" (see abs/1803.08494 go/dune-gn).
This function creates beta/gamma variables in a name_scope, and uses them to
apply `group_normalize` on the input `x`.
You can either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
If you specify neither, the paper's recommended `num_groups=32` is used.
Authors: Lucas Beyer, Joan Puigcerver.
"""
def __init__(self,
num_groups=None,
group_size=None,
eps=1e-5,
beta_init=tf.zeros_initializer(),
gamma_init=tf.ones_initializer(),
**kwargs):
"""Initializer.
Args:
num_groups: int, the number of channel-groups to normalize over.
group_size: int, size of the groups to normalize over.
eps: float, a small additive constant to avoid /sqrt(0).
beta_init: initializer for bias, defaults to zeros.
gamma_init: initializer for scale, defaults to ones.
**kwargs: other tf.keras.layers.Layer arguments.
"""
super(GroupNormalization, self).__init__(**kwargs)
if num_groups is None and group_size is None:
num_groups = 32
self._num_groups = num_groups
self._group_size = group_size
self._eps = eps
self._beta_init = beta_init
self._gamma_init = gamma_init
def build(self, input_size):
channels = input_size[-1]
assert channels is not None, "Cannot apply GN on dynamic channels."
self._gamma = self.add_weight(
name="gamma", shape=(channels,), initializer=self._gamma_init,
dtype=self.dtype)
self._beta = self.add_weight(
name="beta", shape=(channels,), initializer=self._beta_init,
dtype=self.dtype)
super(GroupNormalization, self).build(input_size)
def call(self, x):
return group_normalize(x, self._gamma, self._beta, self._num_groups,
self._group_size, self._eps)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reference implementation of AugMix's data augmentation method in numpy."""
import augmentations
import numpy as np
from PIL import Image
# CIFAR-10 constants
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.2010]
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
image = image.transpose(2, 0, 1) # Switch to channel-first
mean, std = np.array(MEAN), np.array(STD)
image = (image - mean[:, None, None]) / std[:, None, None]
return image.transpose(1, 2, 0)
def apply_op(image, op, severity):
image = np.clip(image * 255., 0, 255).astype(np.uint8)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img) / 255.
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.float32(
np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image)
for i in range(width):
image_aug = image.copy()
d = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(d):
op = np.random.choice(augmentations.augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * normalize(image) + m * mix
return mixed
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on ImageNet.
Currently only supports ResNet-50 training.
Example usage:
`python imagenet.py <path/to/ImageNet> <path/to/ImageNet-C>`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import models
from torchvision import transforms
augmentations.IMAGE_SIZE = 224
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__') and
callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Trains an ImageNet Classifier')
parser.add_argument(
'clean_data', metavar='DIR', help='path to clean ImageNet dataset')
parser.add_argument(
'corrupted_data', metavar='DIR_C', help='path to ImageNet-C dataset')
parser.add_argument(
'--model',
'-m',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=90, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=256, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0001,
help='Weight decay (L2 penalty).')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=1,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--aug-prob-coeff',
default=1.,
type=float,
help='Probability distribution coefficients')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=10,
help='Training loss print frequency (batches).')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
# Raw AlexNet errors taken from https://github.com/hendrycks/robustness
ALEXNET_ERR = [
0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,
0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,
0.606500
]
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs."""
b = args.batch_size / 256.
k = args.epochs // 3
if epoch < k:
m = 1
elif epoch < 2 * k:
m = 0.1
else:
m = 0.01
lr = args.learning_rate * m * b
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def compute_mce(corruption_accs):
"""Compute mCE (mean Corruption Error) normalized by AlexNet performance."""
mce = 0.
for i in range(len(CORRUPTIONS)):
avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])
ce = 100 * avg_err / ALEXNET_ERR[i]
mce += ce / 15
return mce
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(
np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width))
m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer):
"""Train for one epoch."""
net.train()
data_ema = 0.
batch_ema = 0.
loss_ema = 0.
acc1_ema = 0.
acc5_ema = 0.
end = time.time()
for i, (images, targets) in enumerate(train_loader):
# Compute data loading time
data_time = time.time() - end
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
acc1, acc5 = accuracy(logits_clean, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
loss.backward()
optimizer.step()
# Compute batch computation time and update moving averages.
batch_time = time.time() - end
end = time.time()
data_ema = data_ema * 0.1 + float(data_time) * 0.9
batch_ema = batch_ema * 0.1 + float(batch_time) * 0.9
loss_ema = loss_ema * 0.1 + float(loss) * 0.9
acc1_ema = acc1_ema * 0.1 + float(acc1) * 0.9
acc5_ema = acc5_ema * 0.1 + float(acc5) * 0.9
if i % args.print_freq == 0:
print(
'Batch {}/{}: Data Time {:.3f} | Batch Time {:.3f} | Train Loss {:.3f} | Train Acc1 '
'{:.3f} | Train Acc5 {:.3f}'.format(i, len(train_loader), data_ema,
batch_ema, loss_ema, acc1_ema,
acc5_ema))
return loss_ema, acc1_ema, batch_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_transform):
"""Evaluate network on given corrupted dataset."""
corruption_accs = {}
for c in CORRUPTIONS:
print(c)
for s in range(1, 6):
valdir = os.path.join(args.corrupted_data, c, str(s))
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
loss, acc1 = test(net, val_loader)
if c in corruption_accs:
corruption_accs[c].append(acc1)
else:
corruption_accs[c] = [acc1]
print('\ts={}: Test Loss {:.3f} | Test Acc1 {:.3f}'.format(
s, loss, 100. * acc1))
return corruption_accs
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose(
[transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
preprocess,
])
traindir = os.path.join(args.clean_data, 'train')
valdir = os.path.join(args.clean_data, 'val')
train_dataset = datasets.ImageFolder(traindir, train_transform)
train_dataset = AugMixDataset(train_dataset, preprocess)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.model))
net = models.__dict__[args.model](pretrained=True)
else:
print("=> creating model '{}'".format(args.model))
net = models.__dict__[args.model]()
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc1 = checkpoint['best_acc1']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
test_loss, test_acc1 = test(net, val_loader)
print('Clean\n\tTest Loss {:.3f} | Test Acc1 {:.3f}'.format(
test_loss, 100 * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join([c] + map(str, corruption_accs[c])))
print('mCE (normalized by AlexNet): ', compute_mce(corruption_accs))
return
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
'imagenet_{}_training_log.csv'.format(args.model))
with open(log_path, 'w') as f:
f.write(
'epoch,batch_time,train_loss,train_acc1(%),test_loss,test_acc1(%)\n')
best_acc1 = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train_loss_ema, train_acc1_ema, batch_ema = train(net, train_loader,
optimizer)
test_loss, test_acc1 = test(net, val_loader)
is_best = test_acc1 > best_acc1
best_acc1 = max(test_acc1, best_acc1)
checkpoint = {
'epoch': epoch,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%0.3f,%0.6f,%0.2f,%0.5f,%0.2f\n' % (
(epoch + 1),
batch_ema,
train_loss_ema,
100. * train_acc1_ema,
test_loss,
100. * test_acc1,
))
print(
'Epoch {:3d} | Train Loss {:.4f} | Test Loss {:.3f} | Test Acc1 '
'{:.2f}'
.format((epoch + 1), train_loss_ema, test_loss, 100. * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join(map(str, [c] + corruption_accs[c])))
print('mCE (normalized by AlexNet):', compute_mce(corruption_accs))
if __name__ == '__main__':
main()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on CIFAR-10/100.
Supports WideResNet, AllConv, ResNeXt models on CIFAR-10 and CIFAR-100 as well
as evaluation on CIFAR-10-C and CIFAR-100-C.
Example usage:
`python cifar.py`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
from models.cifar.allconv import AllConvNet
import numpy as np
from third_party.ResNeXt_DenseNet.models.densenet import densenet
from third_party.ResNeXt_DenseNet.models.resnext import resnext29
from third_party.WideResNet_pytorch.wideresnet import WideResNet
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
parser = argparse.ArgumentParser(
description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default='cifar10',
choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument(
'--model',
'-m',
type=str,
default='wrn',
choices=['wrn', 'allconv', 'densenet', 'resnext'],
help='Choose architecture.')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=100, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0005,
help='Weight decay (L2 penalty).')
# WRN Architecture options
parser.add_argument(
'--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='Widen factor')
parser.add_argument(
'--droprate', default=0.0, type=float, help='Dropout probability')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=3,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=50,
help='Training loss print frequency (batches).')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 +
np.cos(step / total_steps * np.pi))
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(np.random.dirichlet([1] * args.mixture_width))
m = np.float32(np.random.beta(1, 1))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer, scheduler):
"""Train for one epoch."""
net.train()
loss_ema = 0.
for i, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
loss.backward()
optimizer.step()
scheduler.step()
loss_ema = loss_ema * 0.9 + float(loss) * 0.1
if i % args.print_freq == 0:
print('Train Loss {:.3f}'.format(loss_ema))
return loss_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_data, base_path):
"""Evaluate network on given corrupted dataset."""
corruption_accs = []
for corruption in CORRUPTIONS:
# Reference to original data is mutated
test_data.data = np.load(base_path + corruption + '.npy')
test_data.targets = torch.LongTensor(np.load(base_path + 'labels.npy'))
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
test_loss, test_acc = test(net, test_loader)
corruption_accs.append(test_acc)
print('{}\n\tTest Loss {:.3f} | Test Error {:.3f}'.format(
corruption, test_loss, 100 - 100. * test_acc))
return np.mean(corruption_accs)
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4)])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
test_transform = preprocess
if args.dataset == 'cifar10':
train_data = datasets.CIFAR10(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR10(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-10-C/'
num_classes = 10
else:
train_data = datasets.CIFAR100(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR100(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-100-C/'
num_classes = 100
train_data = AugMixDataset(train_data, preprocess, args.no_jsd)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
# Create model
if args.model == 'densenet':
net = densenet(num_classes=num_classes)
elif args.model == 'wrn':
net = WideResNet(args.layers, num_classes, args.widen_factor, args.droprate)
elif args.model == 'allconv':
net = AllConvNet(num_classes)
elif args.model == 'resnext':
net = resnext29(num_classes=num_classes)
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay,
nesterov=True)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc = checkpoint['best_acc']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
# Evaluate clean accuracy first because test_c mutates underlying data
test_loss, test_acc = test(net, test_loader)
print('Clean\n\tTest Loss {:.3f} | Test Error {:.2f}'.format(
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
return
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda
step,
args.epochs * len(train_loader),
1, # lr_lambda computes multiplicative factor
1e-6 / args.learning_rate))
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
args.dataset + '_' + args.model + '_training_log.csv')
with open(log_path, 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
best_acc = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
begin_time = time.time()
train_loss_ema = train(net, train_loader, optimizer, scheduler)
test_loss, test_acc = test(net, test_loader)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
checkpoint = {
'epoch': epoch,
'dataset': args.dataset,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_time,
train_loss_ema,
test_loss,
100 - 100. * test_acc,
))
print(
'Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} |'
' Test Error {4:.2f}'
.format((epoch + 1), int(time.time() - begin_time), train_loss_ema,
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' %
(args.epochs + 1, 0, 0, 0, 100 - 100 * test_c_acc))
if __name__ == '__main__':
main()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base augmentations operators."""
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
# ImageNet code should change this value
IMAGE_SIZE = 32
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AllConv implementation (https://arxiv.org/abs/1412.6806)."""
import math
import torch
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def make_layers(cfg):
"""Create a single layer."""
layers = []
in_channels = 3
for v in cfg:
if v == 'Md':
layers += [nn.MaxPool2d(kernel_size=2, stride=2), nn.Dropout(p=0.5)]
elif v == 'A':
layers += [nn.AvgPool2d(kernel_size=8)]
elif v == 'NIN':
conv2d = nn.Conv2d(in_channels, in_channels, kernel_size=1, padding=1)
layers += [conv2d, nn.BatchNorm2d(in_channels), GELU()]
elif v == 'nopad':
conv2d = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=0)
layers += [conv2d, nn.BatchNorm2d(in_channels), GELU()]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.BatchNorm2d(v), GELU()]
in_channels = v
return nn.Sequential(*layers)
class AllConvNet(nn.Module):
"""AllConvNet main class."""
def __init__(self, num_classes):
super(AllConvNet, self).__init__()
self.num_classes = num_classes
self.width1, w1 = 96, 96
self.width2, w2 = 192, 192
self.features = make_layers(
[w1, w1, w1, 'Md', w2, w2, w2, 'Md', 'nopad', 'NIN', 'NIN', 'A'])
self.classifier = nn.Linear(self.width2, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n)) # He initialization
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
|
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
return self.fc(out)
|
"""DenseNet implementation (https://arxiv.org/abs/1608.06993)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
"""Bottleneck block for DenseNet."""
def __init__(self, n_channels, growth_rate):
super(Bottleneck, self).__init__()
inter_channels = 4 * growth_rate
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, inter_channels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(inter_channels)
self.conv2 = nn.Conv2d(
inter_channels, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
"""Layer container for blocks."""
def __init__(self, n_channels, growth_rate):
super(SingleLayer, self).__init__()
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
"""Transition block."""
def __init__(self, n_channels, n_out_channels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(n_channels)
self.conv1 = nn.Conv2d(
n_channels, n_out_channels, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
"""DenseNet main class."""
def __init__(self, growth_rate, depth, reduction, n_classes, bottleneck):
super(DenseNet, self).__init__()
if bottleneck:
n_dense_blocks = int((depth - 4) / 6)
else:
n_dense_blocks = int((depth - 4) / 3)
n_channels = 2 * growth_rate
self.conv1 = nn.Conv2d(3, n_channels, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
n_out_channels = int(math.floor(n_channels * reduction))
self.trans1 = Transition(n_channels, n_out_channels)
n_channels = n_out_channels
self.dense2 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
n_out_channels = int(math.floor(n_channels * reduction))
self.trans2 = Transition(n_channels, n_out_channels)
n_channels = n_out_channels
self.dense3 = self._make_dense(n_channels, growth_rate, n_dense_blocks,
bottleneck)
n_channels += n_dense_blocks * growth_rate
self.bn1 = nn.BatchNorm2d(n_channels)
self.fc = nn.Linear(n_channels, n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, n_channels, growth_rate, n_dense_blocks, bottleneck):
layers = []
for _ in range(int(n_dense_blocks)):
if bottleneck:
layers.append(Bottleneck(n_channels, growth_rate))
else:
layers.append(SingleLayer(n_channels, growth_rate))
n_channels += growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = self.fc(out)
return out
def densenet(growth_rate=12, depth=40, num_classes=10):
model = DenseNet(growth_rate, depth, 1., num_classes, False)
return model
|
"""ResNeXt implementation (https://arxiv.org/abs/1611.05431)."""
import math
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
class ResNeXtBottleneck(nn.Module):
"""ResNeXt Bottleneck Block type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)."""
expansion = 4
def __init__(self,
inplanes,
planes,
cardinality,
base_width,
stride=1,
downsample=None):
super(ResNeXtBottleneck, self).__init__()
dim = int(math.floor(planes * (base_width / 64.0)))
self.conv_reduce = nn.Conv2d(
inplanes,
dim * cardinality,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn_reduce = nn.BatchNorm2d(dim * cardinality)
self.conv_conv = nn.Conv2d(
dim * cardinality,
dim * cardinality,
kernel_size=3,
stride=stride,
padding=1,
groups=cardinality,
bias=False)
self.bn = nn.BatchNorm2d(dim * cardinality)
self.conv_expand = nn.Conv2d(
dim * cardinality,
planes * 4,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn_expand = nn.BatchNorm2d(planes * 4)
self.downsample = downsample
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)
bottleneck = self.conv_conv(bottleneck)
bottleneck = F.relu(self.bn(bottleneck), inplace=True)
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""ResNext optimized for the Cifar dataset, as specified in https://arxiv.org/pdf/1611.05431.pdf."""
def __init__(self, block, depth, cardinality, base_width, num_classes):
super(CifarResNeXt, self).__init__()
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 9 == 0, 'depth should be one of 29, 38, 47, 56, 101'
layer_blocks = (depth - 2) // 9
self.cardinality = cardinality
self.base_width = base_width
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.inplanes = 64
self.stage_1 = self._make_layer(block, 64, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 128, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 256, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, self.cardinality, self.base_width, stride,
downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes, planes, self.cardinality, self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnext29(num_classes=10, cardinality=4, base_width=32):
model = CifarResNeXt(ResNeXtBottleneck, 29, cardinality, base_width,
num_classes)
return model
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cascades API."""
# A new PyPI release will be pushed everytime `__version__` is increased
# When changing this, also update the CHANGELOG.md
__version__ = '0.3.2'
from cascades._src.distributions.base import UniformCategorical
from cascades._src.distributions.gpt import GPT
from cascades._src.distributions.choose import Choose
from cascades._src.distributions.strings import get_default_lm
from cascades._src.distributions.strings import mock_lm
from cascades._src.distributions.strings import set_default_lm
from cascades._src.distributions.strings import String
from cascades._src.handlers import factor
from cascades._src.handlers import log
from cascades._src.handlers import observe
from cascades._src.handlers import param
from cascades._src.handlers import reject
from cascades._src.handlers import rejection_sample
from cascades._src.handlers import sample
from cascades._src.inference import model
from cascades._src.interpreter import Interpreter
|
# Copyright 2023 The cascades Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for handlers."""
import math
from absl.testing import absltest
from cascades._src import handlers
import jax.numpy as jnp
from numpyro import distributions as np_dists
def _binomial(k, p=0.5):
total = 0
for _ in range(k):
flip = yield handlers.sample(np_dists.Bernoulli(probs=p))
total += flip
return total
def _flip_paths(n, a, b):
nheads = yield _binomial(n)
if int(nheads) != a:
yield handlers.reject(reason=f'nheads {nheads} != {a}')
nheads = yield _binomial(n)
if int(nheads) != b:
yield handlers.reject(reason=f'nheads {nheads} != {b}')
def _gaussian_mixture(locs):
"""Standard gaussian mixture model."""
n = len(locs)
mixing_dist = np_dists.Categorical(probs=jnp.ones(n) / n)
component_dist = np_dists.Normal(loc=jnp.array(locs), scale=jnp.ones(n))
mixture = np_dists.MixtureSameFamily(mixing_dist, component_dist)
return mixture
def gaussian_mixture_likelihood(proposal_loc=0.0,
proposal_scale=3.0,
mixture_locs=(-5.0, 5.0)):
"""Demonstrate proposing & scoring in same program."""
# Proposal distribution
proposal = yield handlers.sample(
name='proposal',
dist=np_dists.Normal(loc=proposal_loc, scale=proposal_scale))
mixture = _gaussian_mixture(mixture_locs)
# Add term to likelihood
yield handlers.sample(name='score', dist=mixture, obs=proposal)
return proposal
class SimpleTests(absltest.TestCase):
def test_likelihood_weighting(self):
"""Sample from normal, and weight using mixture."""
locs = [-5.0, 5.0]
def _fn(verbose=False):
del verbose
return gaussian_mixture_likelihood(mixture_locs=locs)
mixture = _gaussian_mixture(locs)
forward_sample_handler, result = handlers.forward_sample(fn=_fn, seed=0)
expected_score = mixture.log_prob(result['return_value'])
self.assertAlmostEqual(expected_score, result['observed_likelihood'])
self.assertEqual(forward_sample_handler.result, result['return_value'])
def test_paths_rejection_samping(self):
fn = lambda: _flip_paths(3, 1, 2)
fn = handlers.AutoName(fn) # Uniquely name each sample.
_, result = handlers.rejection_sample(fn=fn, seed=0, max_attempts=100)
effects = result['intermediates']
nheads = sum(eff.value for eff in effects)
self.assertEqual(3, int(nheads))
class RejectTest(absltest.TestCase):
def test_reject(self):
def _reject_test():
yield handlers.log('log1', 'Log 1')
yield handlers.reject(reason='rejected for no reason')
yield handlers.log('log2', 'Log 2')
_, result = handlers.forward_sample(fn=_reject_test, seed=0)
self.assertTrue(math.isinf(result['observed_likelihood']))
self.assertLess(result['observed_likelihood'], 0)
self.assertIsInstance(result['intermediates'][-1], handlers.Reject)
self.assertLen(result['intermediates'], 2)
if __name__ == '__main__':
absltest.main()
|