python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module setuptools script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
description = """A synthetic dataset of school-level mathematics questions.
This dataset code generates mathematical question and answer pairs, from a range
of question types (such as in arithmetic, algebra, probability, etc), at roughly
school-level difficulty. This is designed to test the mathematical learning and
reasoning skills of learning models.
Original paper: Analysing Mathematical Reasoning Abilities of Neural Models
(Saxton, Grefenstette, Hill, Kohli) (https://openreview.net/pdf?id=H1gR5iR5FX).
"""
setup(
name='mathematics_dataset',
version='1.0.1',
description='A synthetic dataset of school-level mathematics questions',
long_description=description,
author='DeepMind',
author_email='[email protected]',
license='Apache License, Version 2.0',
keywords='mathematics dataset',
url='https://github.com/deepmind/mathematics_dataset',
packages=find_packages(),
install_requires=[
'absl-py>=0.1.0',
'numpy>=1.10',
'six',
'sympy>=1.2',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| mathematics_dataset-master | setup.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of how to write generated questions to text files.
Given an output directory, this will create the following subdirectories:
* train-easy
* train-medium
* train-hard
* interpolate
* extrapolate
and populate each of these directories with a text file for each of the module,
where the text file contains lines alternating between the question and the
answer.
Passing --train_split=False will create a single output directory 'train' for
training data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl import app
from absl import flags
from absl import logging
from mathematics_dataset import generate
import six
from six.moves import range
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', None, 'Where to write output text')
flags.DEFINE_boolean('train_split', True,
'Whether to split training data by difficulty')
flags.mark_flag_as_required('output_dir')
def main(unused_argv):
generate.init_modules(FLAGS.train_split)
output_dir = os.path.expanduser(FLAGS.output_dir)
if os.path.exists(output_dir):
logging.fatal('output dir %s already exists', output_dir)
logging.info('Writing to %s', output_dir)
os.makedirs(output_dir)
for regime, flat_modules in six.iteritems(generate.filtered_modules):
regime_dir = os.path.join(output_dir, regime)
os.mkdir(regime_dir)
per_module = generate.counts[regime]
for module_name, module in six.iteritems(flat_modules):
path = os.path.join(regime_dir, module_name + '.txt')
with open(path, 'w') as text_file:
for _ in range(per_module):
problem, _ = generate.sample_from_module(module)
text_file.write(str(problem.question) + '\n')
text_file.write(str(problem.answer) + '\n')
logging.info('Written %s', path)
if __name__ == '__main__':
app.run(main)
| mathematics_dataset-master | mathematics_dataset/generate_to_file.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.generate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from mathematics_dataset import generate
import six
from six.moves import range
class GenerateTest(parameterized.TestCase):
def testMakeEntropyFn(self):
entropy_full = generate._make_entropy_fn(0, 1)
self.assertEqual(entropy_full((2, 3)), (2, 3))
entropy_third = generate._make_entropy_fn(2, 3)
self.assertEqual(entropy_third((3, 6)), (5, 6))
@parameterized.parameters('train', 'interpolate', 'extrapolate')
def testGenerate(self, regime):
generate.init_modules()
for module in six.itervalues(generate.filtered_modules[regime]):
for _ in range(3):
question = module()
str(question)
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/generate_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prints to stdout different curriculum questions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import textwrap
# Dependency imports
from absl import app
from absl import flags
from absl import logging
from mathematics_dataset import generate_settings
from mathematics_dataset.modules import modules
import six
from six.moves import range
FLAGS = flags.FLAGS
flags.DEFINE_string('filter', '', 'restrict to matching module names')
flags.DEFINE_integer('per_train_module', 10, 'Num of examples per train module')
flags.DEFINE_integer('per_test_module', 10, 'Num of examples per test module')
flags.DEFINE_bool('show_dropped', False, 'Whether to print dropped questions')
filtered_modules = collections.OrderedDict([])
counts = {}
def _make_entropy_fn(level, num_levels):
"""This returns a function that returns a subrange of entropy.
E.g., if level=1 (medium) and num_levels=3, then the returned function will
map the range [x, x + y] to [x + y/3, x + 2y/3].
Args:
level: Integer in range [0, num_levels - 1].
num_levels: Number of difficulty levels.
Returns:
Function to restrict entropy range.
"""
lower = level / num_levels
upper = (level + 1) / num_levels
def modify_entropy(range_):
assert len(range_) == 2
length = range_[1] - range_[0]
return (range_[0] + lower * length, range_[0] + upper * length)
return modify_entropy
def _filter_and_flatten(modules_):
"""Returns flattened dict, filtered according to FLAGS."""
flat = collections.OrderedDict()
def add(submodules, prefix=None):
for key, module_or_function in six.iteritems(submodules):
full_name = prefix + '__' + key if prefix is not None else key
if isinstance(module_or_function, dict):
add(module_or_function, full_name)
else:
if FLAGS.filter not in full_name:
continue
flat[full_name] = module_or_function
add(modules_)
# Make sure list of modules are in deterministic order. This is important when
# generating across multiple machines.
flat = collections.OrderedDict(
[(key, flat[key]) for key in sorted(six.iterkeys(flat))])
return flat
def init_modules(train_split=False):
"""Inits the dicts containing functions for generating modules."""
if filtered_modules:
return # already initialized
all_modules = collections.OrderedDict([])
if train_split:
all_modules['train-easy'] = modules.train(_make_entropy_fn(0, 3))
all_modules['train-medium'] = modules.train(_make_entropy_fn(1, 3))
all_modules['train-hard'] = modules.train(_make_entropy_fn(2, 3))
else:
all_modules['train'] = modules.train(_make_entropy_fn(0, 1))
all_modules['interpolate'] = modules.test()
all_modules['extrapolate'] = modules.test_extra()
counts['train'] = FLAGS.per_train_module
counts['train-easy'] = FLAGS.per_train_module // 3
counts['train-medium'] = FLAGS.per_train_module // 3
counts['train-hard'] = FLAGS.per_train_module // 3
counts['interpolate'] = FLAGS.per_test_module
counts['extrapolate'] = FLAGS.per_test_module
for regime_, modules_ in six.iteritems(all_modules):
filtered_modules[regime_] = _filter_and_flatten(modules_)
def sample_from_module(module):
"""Samples a problem, ignoring samples with overly long questions / answers.
Args:
module: Callable returning a `Problem`.
Returns:
Pair `(problem, num_dropped)`, where `problem` is an instance of `Problem`
and `num_dropped` is an integer >= 0 indicating the number of samples that
were dropped.
"""
num_dropped = 0
while True:
problem = module()
question = str(problem.question)
if len(question) > generate_settings.MAX_QUESTION_LENGTH:
num_dropped += 1
if FLAGS.show_dropped:
logging.warning('Dropping question: %s', question)
continue
answer = str(problem.answer)
if len(answer) > generate_settings.MAX_ANSWER_LENGTH:
num_dropped += 1
if FLAGS.show_dropped:
logging.warning('Dropping question with answer: %s', answer)
continue
return problem, num_dropped
def main(unused_argv):
"""Prints Q&As from modules according to FLAGS.filter."""
init_modules()
text_wrapper = textwrap.TextWrapper(
width=80, initial_indent=' ', subsequent_indent=' ')
for regime, flat_modules in six.iteritems(filtered_modules):
per_module = counts[regime]
for module_name, module in six.iteritems(flat_modules):
# These magic print constants make the header bold.
print('\033[1m{}/{}\033[0m'.format(regime, module_name))
num_dropped = 0
for _ in range(per_module):
problem, extra_dropped = sample_from_module(module)
num_dropped += extra_dropped
text = text_wrapper.fill(
'{} \033[92m{}\033[0m'.format(problem.question, problem.answer))
print(text)
if num_dropped > 0:
logging.warning('Dropped %d examples', num_dropped)
if __name__ == '__main__':
app.run(main)
| mathematics_dataset-master | mathematics_dataset/generate.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mathematics_dataset-master | mathematics_dataset/__init__.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Containers for "[example] problems" (i.e., question/answer) pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from mathematics_dataset.util import composition
def question(context, template, **kwargs):
"""Makes a question, using the given context and template.
The format is similar to that for python's `format` function, for example:
```
question(context, 'What is {} plus {p} over {q}?', 2, p=3, q=4)
```
The main difference between this and the standard python formatting is that
this understands `Entity`s in the arguments, and will do appropriate expansion
of text and prefixing of their descriptions.
Arguments:
context: Instance of `composition.Context`, for extracting entities needed
for describing the problem.
template: A string, like "Calculate the value of {exp}.".
**kwargs: A dictionary mapping arguments to values, e.g.,
`{'exp': sympy.Add(2, 3, evaluate=False)}`.
Returns:
String.
"""
assert isinstance(context, composition.Context)
assert isinstance(template, str)
prefix, kwargs = composition.expand_entities(context, **kwargs)
if prefix:
prefix += ' '
return prefix + template.format(**kwargs)
Problem = collections.namedtuple('Problem', ('question', 'answer'))
| mathematics_dataset-master | mathematics_dataset/example.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
MAX_QUESTION_LENGTH = 160
MAX_ANSWER_LENGTH = 30
QUESTION_CHARS = (
['', ' '] + list(string.ascii_letters + string.digits + string.punctuation))
EMPTY_INDEX = QUESTION_CHARS.index('')
NUM_INDICES = len(QUESTION_CHARS)
CHAR_TO_INDEX = {char: index for index, char in enumerate(QUESTION_CHARS)}
INDEX_TO_CHAR = {index: char for index, char in enumerate(QUESTION_CHARS)}
| mathematics_dataset-master | mathematics_dataset/generate_settings.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.util.composition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.util import composition
import sympy
class FunctionHandleTest(absltest.TestCase):
def testApply(self):
handle = composition.FunctionHandle('f', 'g')
applied = handle.apply(*sympy.symbols('x y'))
self.assertEqual(str(applied), 'f(g(x, y))')
applied = handle.apply(sympy.symbols('x'))
self.assertEqual(str(applied), 'f(g(x))')
class ContextTest(absltest.TestCase):
def testPeel(self):
sample_args = composition.SampleArgs(4, 3.0)
entropy, new_sample_args = sample_args.peel()
self.assertAlmostEqual(entropy, 0.75)
self.assertEqual(new_sample_args.num_modules, 4)
self.assertAlmostEqual(new_sample_args.entropy, 2.25)
def testSplit(self):
sample_args = composition.SampleArgs(4, 5.0)
children = sample_args.split(2)
self.assertLen(children, 2)
self.assertEqual(sum([child.num_modules for child in children]), 3)
self.assertAlmostEqual(sum([child.entropy for child in children]), 5.0)
class EntityTest(absltest.TestCase):
def testInit_valueErrorIfSelfAndHandle(self):
with self.assertRaisesRegex(self, ValueError, 'Cannot specify handle'):
composition.Entity(context=composition.Context(),
value=0,
description='Something with {self}. ',
handle='additional')
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/util/composition_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mathematics_dataset-master | mathematics_dataset/util/__init__.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests mathematics_dataset.util.combinatorics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.util import combinatorics
class CombinatoricsTest(absltest.TestCase):
def testPositiveIntegersWithSum(self):
result = combinatorics.uniform_positive_integers_with_sum(1, 1)
self.assertEqual(result, [1])
result = combinatorics.uniform_positive_integers_with_sum(2, 2)
self.assertEqual(result, [1, 1])
result = combinatorics.uniform_positive_integers_with_sum(1, 10)
self.assertEqual(sum(result), 10)
result = combinatorics.uniform_positive_integers_with_sum(2, 10)
self.assertEqual(sum(result), 10)
result = combinatorics.uniform_positive_integers_with_sum(0, 0)
self.assertEqual(result, [])
def testNonNegativeIntegersWithSum(self):
result = combinatorics.uniform_non_negative_integers_with_sum(1, 0)
self.assertEqual(result, [0])
result = combinatorics.uniform_non_negative_integers_with_sum(2, 0)
self.assertEqual(result, [0, 0])
result = combinatorics.uniform_non_negative_integers_with_sum(3, 10)
self.assertEqual(sum(result), 10)
def testLogNumberBinaryTrees(self):
self.assertAlmostEqual(
combinatorics.log_number_binary_trees(0), math.log(1))
self.assertAlmostEqual(
combinatorics.log_number_binary_trees(1), math.log(1))
self.assertAlmostEqual(
combinatorics.log_number_binary_trees(2), math.log(2))
self.assertAlmostEqual(
combinatorics.log_number_binary_trees(3), math.log(5))
self.assertAlmostEqual(
combinatorics.log_number_binary_trees(4), math.log(14))
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/util/combinatorics_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for displaying expressions.
SymPy provides a lot of functionality for displaying expressions, but it's
slightly too centered on being a symbolic maths engine to provides all our
needs. For example, it's impossible to display an unsimplified fraction like
3/6, or a decimal that isn't internally represented as a float and thus subject
to rounding.
Also provides some other convenience such as converting numbers to words, and
displaying percentages (properly formatted).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import decimal
# Dependency imports
import sympy
# For converting integers to words:
_INTEGER_LOW = [
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteeen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen'
]
_INTEGER_MID = [
'', '', 'twenty', 'thirty', 'fourty', 'fifty', 'sixty', 'seventy', 'eighty',
'ninety'
]
_INTEGER_HIGH = [
(int(1e12), 'trillion'), (int(1e9), 'billion'), (int(1e6), 'million'),
(int(1e3), 'thousand'), (100, 'hundred')
]
# For converting rationals to words:
_SINGULAR_DENOMINATORS = [
'', '', 'half', 'third', 'quarter', 'fifth', 'sixth', 'seventh', 'eighth',
'ninth', 'tenth', 'eleventh', 'twelth', 'thirteenth', 'fourteenth',
'fifteenth', 'sixteenth', 'seventeenth', 'eighteenth', 'nineteenth',
'twentieth'
]
_PLURAL_DENOMINATORS = [
'', '', 'halves', 'thirds', 'quarters', 'fifths', 'sixths', 'sevenths',
'eighths', 'ninths', 'tenths', 'elevenths', 'twelths', 'thirteenths',
'fourteenths', 'fifteenths', 'sixteenths', 'seventeenths', 'eighteenths',
'nineteenths', 'twentieths'
]
# For converting ordinals to words:
_ORDINALS = [
'zeroth', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh',
'eighth', 'ninth', 'tenth', 'eleventh', 'twelth', 'thirteenth',
'fourteenth', 'fifteenth', 'sixteenth', 'seventeenth', 'eighteenth',
'nineteenth', 'twentieth'
]
class Decimal(object):
"""Display a value as a decimal."""
def __init__(self, value):
"""Initializes a `Decimal`.
Args:
value: (Sympy) value to display as a decimal.
Raises:
ValueError: If `value` cannot be represented as a non-terminating decimal.
"""
self._value = sympy.Rational(value)
numer = int(sympy.numer(self._value))
denom = int(sympy.denom(self._value))
denom_factors = list(sympy.factorint(denom).keys())
for factor in denom_factors:
if factor not in [2, 5]:
raise ValueError('Cannot represent {} as a non-recurring decimal.'
.format(value))
self._decimal = decimal.Decimal(numer) / decimal.Decimal(denom)
@property
def value(self):
"""Returns the value as a `sympy.Rational` object."""
return self._value
def _sympy_(self):
return self._value
def decimal_places(self):
"""Returns the number of decimal places, e.g., 32 has 0 and 1.43 has 2."""
if isinstance(self._decimal, int):
return 0
elif isinstance(self._decimal, decimal.Decimal):
return -self._decimal.as_tuple().exponent
def __str__(self):
sign, digits, exponent = self._decimal.as_tuple()
sign = '' if sign == 0 else '-'
num_left_digits = len(digits) + exponent # number digits "before" point
if num_left_digits > 0:
int_part = ''.join(str(digit) for digit in digits[:num_left_digits])
else:
int_part = '0'
if exponent < 0:
frac_part = '.'
if num_left_digits < 0:
frac_part += '0' * -num_left_digits
frac_part += ''.join(str(digit) for digit in digits[exponent:])
else:
frac_part = ''
return sign + int_part + frac_part
def __add__(self, other):
if not isinstance(other, Decimal):
raise ValueError('Arithmetic support limited to other `Decimal`s.')
return Decimal(self.value + other.value)
def __sub__(self, other):
if not isinstance(other, Decimal):
raise ValueError('Arithmetic support limited to other `Decimal`s.')
return Decimal(self.value - other.value)
def __mul__(self, other):
if not isinstance(other, Decimal):
raise ValueError('Arithmetic support limited to other `Decimal`s.')
return Decimal(self.value * other.value)
def __neg__(self):
return Decimal(-self.value)
def round(self, ndigits=0):
"""Returns a new `Decimal` rounded to this many decimal places."""
scale = sympy.Integer(10 ** ndigits)
numer = sympy.numer(self.value) * scale
denom = sympy.denom(self.value)
return Decimal(int(round(numer / denom)) / scale)
def __round__(self, ndigits):
return self.round(ndigits)
def __int__(self):
"""Returns conversion to integer if possible; TypeError if non-integer."""
if self.decimal_places() == 0:
return int(self._decimal)
else:
raise TypeError('Cannot represent {} as an integer.'.format(str(self)))
# NOTE: this is implemented in addition to `__cmp__` because SymPy does not
# support inequality comparison between sympy objects and objects that are not
# convertible to sympy objects (such as strings).
def __eq__(self, other):
return self.value == other
# Python 2 comparison
def __cmp__(self, other):
if self.value == other:
return 0
if self.value < other:
return -1
return 1
# Python 3 comparison:
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
class Percentage(object):
"""Container for a percentage."""
def __init__(self, value):
"""Initializes a `Percentage`.
Args:
value: Percentage as a fractional value. E.g., pass in
`sympy.Rational(2, 5)` to create the percentage "40%".
"""
self._value = value
def _sympy_(self):
return self._value
def __str__(self):
# Display percentages as decimals (not fractions).
value = Decimal(self._value * 100)
return str(value) + '%'
class NonSimpleRational(object):
"""Container for rational a / b where allow gcd(a, b) > 1."""
def __init__(self, numer, denom):
self._numer = numer
self._denom = denom
@property
def numer(self):
return self._numer
@property
def denom(self):
return self._denom
def __str__(self):
return '{}/{}'.format(self._numer, self._denom)
class StringNumber(object):
"""A string representing a number, that can also be sympified."""
def __init__(self, value, join_number_words_with_hyphens=True):
"""Initializes a `StringNumber`.
Args:
value: An integer or rational.
join_number_words_with_hyphens: Whether to join the words in integers with
hyphens when describing as a string.
"""
self._join_number_words_with_hyphens = join_number_words_with_hyphens
self._sympy_value = sympy.sympify(value)
self._string = self._to_string(value)
def _integer_to_words(self, integer):
"""Converts an integer to a list of words."""
if integer < 0:
raise ValueError('Cannot handle negative numbers.')
if integer < 20:
return [_INTEGER_LOW[integer]]
words = None
if integer < 100:
tens, ones = divmod(integer, 10)
if ones > 0:
return [_INTEGER_MID[tens], _INTEGER_LOW[ones]]
else:
return [_INTEGER_MID[tens]]
for value, word in _INTEGER_HIGH:
if integer >= value:
den, rem = divmod(integer, value)
words = self._integer_to_words(den) + [word]
if rem > 0:
if rem < 100:
words.append('and')
words += self._integer_to_words(rem)
return words
def _rational_to_string(self, rational):
"""Converts a rational to words, e.g., "two thirds"."""
numer = sympy.numer(rational)
denom = sympy.denom(rational)
numer_words = self._to_string(numer)
if denom == 1:
return numer_words
if denom <= 0 or denom >= len(_PLURAL_DENOMINATORS):
raise ValueError('Unsupported denominator {}.'.format(denom))
if numer == 1:
denom_word = _SINGULAR_DENOMINATORS[denom]
else:
denom_word = _PLURAL_DENOMINATORS[denom]
return '{} {}'.format(numer_words, denom_word)
def _to_string(self, number):
"""Converts an integer or rational to words."""
if isinstance(number, sympy.Integer) or isinstance(number, int):
words = self._integer_to_words(number)
join_char = '-' if self._join_number_words_with_hyphens else ' '
return join_char.join(words)
elif isinstance(number, sympy.Rational):
return self._rational_to_string(number)
else:
raise ValueError('Unable to handle number {} with type {}.'
.format(number, type(number)))
def _sympy_(self):
return self._sympy_value
def __str__(self):
return self._string
class StringOrdinal(object):
"""A string representation of an ordinal, e.g., "first"."""
def __init__(self, position):
"""Initializes a `StringOrdinal`.
Args:
position: An integer >= 0.
Raises:
ValueError: If `position` is non-positive or out of range.
"""
if position < 0 or position >= len(_ORDINALS):
raise ValueError('Unsupported ordinal {}.'.format(position))
self._string = _ORDINALS[position]
def __str__(self):
return self._string
class NumberList(object):
"""Contains a list of numbers, intended for display."""
def __init__(self, numbers):
self._numbers = numbers
def __str__(self):
"""Converts the list to a string.
Returns:
Human readable string.
Raises:
ValueError: if any of the strings contain a comma and thus would lead to
an ambigious representation.
"""
strings = []
for number in self._numbers:
string = str(number)
if ',' in string:
raise ValueError('String representation of the list will be ambigious, '
'since term "{}" contains a comma.'.format(string))
strings.append(string)
return ', '.join(strings)
class NumberInBase(object):
"""Contains value, represented in a given base."""
def __init__(self, value, base):
"""Initializes a `NumberInBase`.
Args:
value: Positive or negative integer.
base: Integer in the range [2, 36].
Raises:
ValueError: If base is not in the range [2, 36] (since this is the limit
that can be represented by 10 numbers plus 26 letters).
"""
if not 2 <= base <= 36:
raise ValueError('base={} must be in the range [2, 36]'.format(base))
self._value = value
self._base = base
chars = []
remainder = abs(value)
while True:
digit = remainder % base
char = str(digit) if digit <= 9 else chr(ord('a') + digit - 10)
chars.append(char)
remainder = int(remainder / base)
if remainder == 0:
break
if value < 0:
chars.append('-')
self._str = ''.join(reversed(chars))
def __str__(self):
return self._str
def _sympy_(self):
return self._value
| mathematics_dataset-master | mathematics_dataset/util/display.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combinatorics utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
# Dependency imports
from six.moves import range
from six.moves import zip
def uniform_positive_integers_with_sum(count, sum_):
"""Returns list of size `count` of integers >= 1, summing to `sum_`."""
assert sum_ >= 0
if count > sum_:
raise ValueError('Cannot find {} numbers >= 1 with sum {}'
.format(count, sum_))
if count == 0:
return []
# Select `count - 1` numbers from {1, ..., sum_ - 1}
separators = random.sample(list(range(1, sum_)), count - 1)
separators = sorted(separators)
return [right - left
for left, right in zip([0] + separators, separators + [sum_])]
def uniform_non_negative_integers_with_sum(count, sum_):
"""Returns list of size `count` of integers >= 0, summing to `sum_`."""
positive = uniform_positive_integers_with_sum(count, sum_ + count)
return [i - 1 for i in positive]
def log_number_binary_trees(size):
"""Returns (nat) log of number of binary trees with `size` internal nodes."""
# This is equal to log of C_size, where C_n is the nth Catalan number.
assert isinstance(size, int)
assert size >= 0
log = 0.0
for k in range(2, size + 1):
log += math.log(size + k) - math.log(k)
return log
| mathematics_dataset-master | mathematics_dataset/util/combinatorics.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.util.probability."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.util import probability
import sympy
class FiniteProductEventTest(absltest.TestCase):
def testAllSequences(self):
event = probability.FiniteProductEvent([probability.DiscreteEvent({1, 2}),
probability.DiscreteEvent({3})])
all_sequences = [i for i in event.all_sequences()]
self.assertEqual(all_sequences, [(1, 3), (2, 3)])
class CountLevelSetEventTest(absltest.TestCase):
def testAllSequences(self):
event = probability.CountLevelSetEvent({'a': 2, 'b': 4, 'c': 1})
all_sequences = event.all_sequences()
# Number of sequences should be 7! / (4! * 2! * 1!) = 105.
self.assertLen(all_sequences, 105)
# They should all be unique.
self.assertEqual(len(all_sequences), len(set(all_sequences)))
# And check contains one correctly generated tuple.
self.assertIn(('a', 'b', 'c', 'b', 'b', 'a', 'b'), all_sequences)
class DiscreteProbabilitySpaceTest(absltest.TestCase):
def testBasic(self):
space = probability.DiscreteProbabilitySpace({0: 1, 1: 2, 2: 3})
p = space.probability(probability.DiscreteEvent([0]))
self.assertEqual(p, sympy.Rational(1, 6))
p = space.probability(probability.DiscreteEvent([0, 1]))
self.assertEqual(p, sympy.Rational(1, 2))
p = space.probability(probability.DiscreteEvent([0, 1, 2]))
self.assertEqual(p, 1)
p = space.probability(probability.DiscreteEvent([0, 1, 2, 3]))
self.assertEqual(p, 1)
p = space.probability(probability.DiscreteEvent([3]))
self.assertEqual(p, 0)
class FiniteProductSpaceTest(absltest.TestCase):
def testProbability_FiniteProductEvent(self):
# 5 coin flips of a biased coin with heads prob = 1/3.
base_space = probability.DiscreteProbabilitySpace({'h': 1, 't': 2})
space = probability.FiniteProductSpace([base_space] * 5)
heads = probability.DiscreteEvent({'h'})
tails = probability.DiscreteEvent({'t'})
event = probability.FiniteProductEvent([heads, heads, tails, tails, heads])
self.assertEqual(space.probability(event), sympy.Rational(4, 3**5))
def testProbability_CountLevelSetEvent(self):
base_space = probability.DiscreteProbabilitySpace({'a': 2, 'b': 3, 'c': 5})
space = probability.FiniteProductSpace([base_space] * 12)
event = probability.CountLevelSetEvent({'a': 7, 'b': 2, 'c': 3})
# Probability should be (12 choose 7 2 3) * p(a)^7 p(b)^2 p(c)^3
coeff = 7920
p_a = sympy.Rational(1, 5)
p_b = sympy.Rational(3, 10)
p_c = sympy.Rational(1, 2)
self.assertEqual(space.probability(event),
coeff * pow(p_a, 7) * pow(p_b, 2) * pow(p_c, 3))
class SampleWithoutReplacementSpaceTest(absltest.TestCase):
def testBasic(self):
space = probability.SampleWithoutReplacementSpace({0: 1, 1: 1}, 2)
event_0_0 = probability.FiniteProductEvent(
[probability.DiscreteEvent({0}), probability.DiscreteEvent({0})])
event_0_1 = probability.FiniteProductEvent(
[probability.DiscreteEvent({0}), probability.DiscreteEvent({1})])
p_0_0 = space.probability(event_0_0)
p_0_1 = space.probability(event_0_1)
self.assertEqual(p_0_0, 0)
self.assertEqual(p_0_1, sympy.Rational(1, 2))
space = probability.SampleWithoutReplacementSpace({0: 1, 1: 0}, 1)
event_0 = probability.FiniteProductEvent([probability.DiscreteEvent({0})])
event_1 = probability.FiniteProductEvent([probability.DiscreteEvent({1})])
event_2 = probability.FiniteProductEvent([probability.DiscreteEvent({2})])
p_0 = space.probability(event_0)
p_1 = space.probability(event_1)
p_2 = space.probability(event_2)
self.assertEqual(p_0, 1)
self.assertEqual(p_1, 0)
self.assertEqual(p_2, 0)
class DiscreteRandomVariableTest(absltest.TestCase):
def testCall(self):
random_variable = probability.DiscreteRandomVariable({1: 1, 2: 3, 3: 4})
forwards = random_variable(probability.DiscreteEvent({1, 3}))
self.assertEqual(forwards.values, {1, 4})
def testInverse(self):
random_variable = probability.DiscreteRandomVariable({1: 1, 2: 3, 3: 4})
inverse = random_variable.inverse(probability.DiscreteEvent({1, 3}))
self.assertEqual(inverse.values, {1, 2})
random_variable = probability.DiscreteRandomVariable({1: 1, 2: 1})
inverse = random_variable.inverse(probability.DiscreteEvent({1, 5}))
self.assertEqual(inverse.values, {1, 2})
class FiniteProductRandomVariableTest(absltest.TestCase):
def _random_variable(self):
rv1 = probability.DiscreteRandomVariable({1: 'a', 2: 'b', 3: 'c'})
rv2 = probability.DiscreteRandomVariable({1: 'x', 2: 'y', 3: 'x'})
return probability.FiniteProductRandomVariable((rv1, rv2))
def testCall_FiniteProductEvent(self):
rv = self._random_variable()
event1 = probability.DiscreteEvent({1, 2})
event2 = probability.DiscreteEvent({1, 3})
event = probability.FiniteProductEvent((event1, event2))
result = rv(event)
self.assertIsInstance(result, probability.FiniteProductEvent)
self.assertLen(result.events, 2)
self.assertEqual(result.events[0].values, {'a', 'b'})
self.assertEqual(result.events[1].values, {'x'})
def testInverse_FiniteProductEvent(self):
rv = self._random_variable()
event1 = probability.DiscreteEvent({'a', 'b'})
event2 = probability.DiscreteEvent({'x'})
event = probability.FiniteProductEvent((event1, event2))
result = rv.inverse(event)
self.assertIsInstance(result, probability.FiniteProductEvent)
self.assertLen(result.events, 2)
self.assertEqual(result.events[0].values, {1, 2})
self.assertEqual(result.events[1].values, {1, 3})
def testInverse_CountLevelSetEvent(self):
rv = self._random_variable()
event = probability.CountLevelSetEvent({'a': 1, 'x': 1})
result = rv.inverse(event)
sequences = result.all_sequences()
self.assertLen(sequences, 2)
self.assertEqual(set(sequences), {(1, 1), (1, 3)})
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/util/probability_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for working with probability spaces and random variables.
Basic recap of probability theory, and thus of classes in this file:
* A probability space is a (finite or infinite) set Omega with a probability
measure defined on this.
* A random variable is a mapping from a probability space to another measure
space.
* An event is a measurable set in a sample space.
For example, suppose a bag contains 3 balls: two red balls, and one white ball.
This could be represented by a discrete probability space of size 3 with
elements {1, 2, 3}, with equal measure assigned to all 3 elements; and a random
variable that maps 1->red, 2->red, and 3->white. Then the probability of drawing
a red ball is the measure in the probability space of the inverse under the
random variable mapping of {red}, i.e., of {1, 2}, which is 2/3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
# Dependency imports
import six
from six.moves import zip
import sympy
@six.add_metaclass(abc.ABCMeta)
class Event(object):
"""Represents an event in a measure space."""
@six.add_metaclass(abc.ABCMeta)
class ProbabilitySpace(object):
"""Represents a probability space."""
@abc.abstractmethod
def probability(self, event):
"""Returns the probability of an event."""
@six.add_metaclass(abc.ABCMeta)
class RandomVariable(object):
"""Random variable; a mapping from a probability space to a measure space."""
@abc.abstractmethod
def __call__(self, event):
"""Maps an `_Event` in the probability space to one in the sample space."""
@abc.abstractmethod
def inverse(self, event):
"""Maps event in the sample space back to the inverse in the prob. space."""
class DiscreteEvent(Event):
"""Set of discrete values."""
def __init__(self, values):
self._values = values
@property
def values(self):
return self._values
class FiniteProductEvent(Event):
"""Event consisting of cartesian product of events."""
def __init__(self, events):
"""Initializes a `FiniteProductEvent`.
Args:
events: Tuple of `Event`s; resulting event will be cartesian product of
these.
"""
self._events = events
@property
def events(self):
return self._events
def all_sequences(self):
"""Returns iterator of sequences by selecting a single event in each coord.
This assumes that every component event is an instance of `DiscreteEvent`.
Returns:
Iterator over tuples of values.
Raises:
ValueError: If one of the component events is not a `DiscreteEvent`.
"""
if not all(isinstance(event, DiscreteEvent) for event in self._events):
raise ValueError('Not all component events are DiscreteEvents')
values_list = [event.values for event in self._events]
return itertools.product(*values_list)
class CountLevelSetEvent(Event):
"""Event of all sequences with fixed number of different values occurring."""
def __init__(self, counts):
"""Initializes `CountLevelSetEvent`.
E.g., to construct the event of getting two red balls and one green ball,
pass `counts = {red: 2, green: 1}`. (Then `all_sequences()` would return
`[(red, red, green), (red, green, red), (green, red, red)]`.
Args:
counts: Dictionary mapping values to the number of times they occur in a
sequence.
"""
self._counts = counts
self._all_sequences = None
@property
def counts(self):
return self._counts
def all_sequences(self):
"""Returns all sequences generated by this level set."""
if self._all_sequences is None:
# Generate via dynamic programming.
cache = {} # dict mapping tuple -> list of tuples
labels = list(self._counts.keys())
def generate(counts):
"""Returns list of tuples for given `counts` of labels."""
if sum(counts) == 0:
return [()]
counts = tuple(counts)
if counts in cache:
return cache[counts]
generated = []
for i, count in enumerate(counts):
if count == 0:
continue
counts_minus = list(counts)
counts_minus[i] -= 1
counts_minus = tuple(counts_minus)
extensions = generate(counts_minus)
generated += [tuple([labels[i]] + list(extension))
for extension in extensions]
cache[counts] = generated
return generated
self._all_sequences = generate(list(self._counts.values()))
return self._all_sequences
class SequenceEvent(Event):
"""Collection of sequences."""
def __init__(self, sequences):
self._sequences = sequences
def all_sequences(self):
return self._sequences
def normalize_weights(weights):
"""Normalizes the weights (as sympy.Rational) in dictionary of weights."""
weight_sum = sum(six.itervalues(weights))
return {
i: sympy.Rational(weight, weight_sum)
for i, weight in six.iteritems(weights)
}
class DiscreteProbabilitySpace(ProbabilitySpace):
"""Discrete probability space."""
def __init__(self, weights=None):
"""Initializes an `DiscreteProbabilitySpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
"""
self._weights = normalize_weights(weights)
def probability(self, event):
if isinstance(event, DiscreteEvent):
return sum(self._weights[value]
for value in event.values if value in self._weights)
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
@property
def weights(self):
"""Returns dictionary of probability of each element."""
return self._weights
class FiniteProductSpace(ProbabilitySpace):
"""Finite cartesian product of probability spaces."""
def __init__(self, spaces):
"""Initializes a `FiniteProductSpace`.
Args:
spaces: List of `ProbabilitySpace`.
"""
self._spaces = spaces
def all_spaces_equal(self):
return all([self._spaces[0] == space for space in self._spaces])
def probability(self, event):
# Specializations for optimization.
if isinstance(event, FiniteProductEvent):
assert len(self._spaces) == len(event.events)
return sympy.prod([
space.probability(event_slice)
for space, event_slice in zip(self._spaces, event.events)])
if isinstance(event, CountLevelSetEvent) and self.all_spaces_equal():
space = self._spaces[0]
counts = event.counts
probabilities = {
value: space.probability(DiscreteEvent({value}))
for value in six.iterkeys(counts)
}
num_events = sum(six.itervalues(counts))
assert num_events == len(self._spaces)
# Multinomial coefficient:
coeff = (
sympy.factorial(num_events) / sympy.prod(
[sympy.factorial(i) for i in six.itervalues(counts)]))
return coeff * sympy.prod([
pow(probabilities[value], counts[value])
for value in six.iterkeys(counts)
])
raise ValueError('Unhandled event type {}'.format(type(event)))
@property
def spaces(self):
"""Returns list of spaces."""
return self._spaces
class SampleWithoutReplacementSpace(ProbabilitySpace):
"""Probability space formed by sampling discrete space without replacement."""
def __init__(self, weights, n_samples):
"""Initializes a `SampleWithoutReplacementSpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
n_samples: Number of samples to draw.
Raises:
ValueError: If `n_samples > len(weights)`.
"""
if n_samples > len(weights):
raise ValueError('n_samples is more than number of discrete elements')
self._weights = normalize_weights(weights)
self._n_samples = n_samples
@property
def n_samples(self):
"""Number of samples to draw."""
return self._n_samples
def probability(self, event):
try:
all_sequences = event.all_sequences()
except AttributeError:
raise ValueError('Unhandled event type {}'.format(type(event)))
probability_sum = 0
for sequence in all_sequences:
if len(sequence) != len(set(sequence)):
continue # not all unique, so not "without replacement".
p_sequence = 1
removed_prob = 0
for i in sequence:
p = self._weights[i] if i in self._weights else 0
if p == 0:
p_sequence = 0
break
p_sequence *= p / (1 - removed_prob)
removed_prob += p
probability_sum += p_sequence
return probability_sum
class IdentityRandomVariable(RandomVariable):
"""Identity map of a probability space."""
def __call__(self, event):
return event
def inverse(self, event):
return event
class DiscreteRandomVariable(RandomVariable):
"""Specialization to discrete random variable.
This is simply a mapping from a discrete space to a discrete space (dictionary
lookup).
"""
def __init__(self, mapping):
"""Initializes `DiscreteRandomVariable` from `mapping` dict."""
self._mapping = mapping
self._inverse = {}
for key, value in six.iteritems(mapping):
if value in self._inverse:
self._inverse[value].add(key)
else:
self._inverse[value] = set([key])
def __call__(self, event):
if isinstance(event, DiscreteEvent):
return DiscreteEvent({self._mapping[value] for value in event.values})
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
def inverse(self, event):
if isinstance(event, DiscreteEvent):
set_ = set()
for value in event.values:
if value in self._inverse:
set_.update(self._inverse[value])
return DiscreteEvent(set_)
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
class FiniteProductRandomVariable(RandomVariable):
"""Product random variable.
This has the following semantics. Let this be X = (X_1, ..., X_n). Then
X(w) = (X_1(w_1), ..., X_n(w_n))
(the sample space is assumed to be of sequence type).
"""
def __init__(self, random_variables):
"""Initializes a `FiniteProductRandomVariable`.
Args:
random_variables: Tuple of `RandomVariable`.
"""
self._random_variables = random_variables
def __call__(self, event):
if isinstance(event, FiniteProductEvent):
assert len(event.events) == len(self._random_variables)
zipped = list(zip(self._random_variables, event.events))
return FiniteProductEvent(
[random_variable(sub_event)
for random_variable, sub_event in zipped])
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
def inverse(self, event):
# Specialization for `FiniteProductEvent`; don't need to take all sequences.
if isinstance(event, FiniteProductEvent):
assert len(event.events) == len(self._random_variables)
zipped = list(zip(self._random_variables, event.events))
return FiniteProductEvent(tuple(
random_variable.inverse(sub_event)
for random_variable, sub_event in zipped))
# Try fallback of mapping each sequence separately.
try:
all_sequences = event.all_sequences()
except AttributeError:
raise ValueError('Unhandled event type {}'.format(type(event)))
mapped = set()
for sequence in all_sequences:
assert len(sequence) == len(self._random_variables)
zipped = list(zip(self._random_variables, sequence))
mapped_sequence = FiniteProductEvent(tuple(
random_variable.inverse(DiscreteEvent({element}))
for random_variable, element in zipped))
mapped.update(mapped_sequence.all_sequences())
return SequenceEvent(mapped)
| mathematics_dataset-master | mathematics_dataset/util/probability.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For performing module composition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import string
# Dependency imports
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.sample import polynomials
from mathematics_dataset.util import combinatorics
from mathematics_dataset.util import display
import numpy as np
import six
from six.moves import range
from six.moves import zip
import sympy
# Allowed symbols. Don't allow "e", as sympy can hang when printing it as a
# function symbol (and it's reserved for exponent).
_ALLOWED_SYMBOLS = set(string.ascii_lowercase).difference(set(['e']))
class Polynomial(collections.namedtuple('Polynomial', ('coefficients'))):
"""Value wrapper for a polynomial function.
Attributes:
coefficients: Numpy array of coefficients; see `polynomials.py`.
"""
def __new__(cls, coefficients):
coefficients = np.asarray(coefficients)
return super(Polynomial, cls).__new__(cls, coefficients)
def is_polynomial(value):
return isinstance(value, Polynomial)
def is_integer_polynomial(value):
if not is_polynomial(value):
return False
coefficients = np.reshape(value.coefficients, [-1])
return all(number.is_integer(coeff) for coeff in coefficients)
# List of pairs of `(filter, sampler)`, where `filter` is a function returning
# True if the sampler is valid for the given value, and `sampler` returns an
# `Entity`.
_FILTERS_AND_SAMPLERS = []
def module(filter_):
"""Returns a Decorator for a module function.
The returned decorator adds the function to the library of known modules.
Args:
filter_: Callable determining whether the module can handle a given value.
Returns:
Decorator that adds the module function to the library.
"""
def decorator(module_fn):
"""Decorates a module function."""
_FILTERS_AND_SAMPLERS.append((filter_, module_fn))
return module_fn
return decorator
class SampleArgs(
collections.namedtuple('SampleArgs', ('num_modules', 'entropy'))):
"""For sampling mathematical entities / questions."""
def peel(self, frac=1):
"""Peels one (or `frac`) of a module's entropy.
In addition to a portion of the entropy, this returns a new `SampleArgs`
(since this object is immutable), which you should use when creating child
modules.
Args:
frac: Float; proportion of module's entropy to take.
Returns:
Triple `(entropy, new_sample_args)`, where `new_sample_args` is a new
`SampleArgs` with the entropy removed.
"""
entropy = frac * self.entropy / self.num_modules
new_sample_args = SampleArgs(
num_modules=self.num_modules, entropy=self.entropy - entropy)
return entropy, new_sample_args
def split(self, count):
"""Splits the entropy and module counts up.
Args:
count: Integer >= 1; the split size.
Returns:
List of `SampleArgs` of length `count`, to be passed to create child
entities.
Raises:
ValueError: If it was not possible to use up all the entropy, for example,
all requested types were `WithValue`.
"""
num_child_modules = self.num_modules - 1
# Sample module counts at random
module_counts = combinatorics.uniform_non_negative_integers_with_sum(
count, num_child_modules)
if num_child_modules == 0:
if self.entropy > 0:
raise ValueError('Unused entropy')
entropies = np.zeros(count)
else:
entropies = self.entropy * np.random.dirichlet(
np.maximum(1e-9, module_counts))
sample_args = []
for i, num_modules in enumerate(module_counts):
child_sample_args = SampleArgs(
num_modules=num_modules, entropy=entropies[i])
sample_args.append(child_sample_args)
return sample_args
class PreSampleArgs(
collections.namedtuple(
'PreSampleArgs',
('min_modules', 'max_modules', 'min_entropy', 'max_entropy'))):
"""Sample args before module count and entropy have been sampled."""
def __call__(self):
"""Samples `SampleArgs`."""
return SampleArgs(
num_modules=random.randint(self.min_modules, self.max_modules),
entropy=random.uniform(self.min_entropy, self.max_entropy))
def peel(self, *args, **kwargs):
sample_args = self()
return sample_args.peel(*args, **kwargs)
def split(self, *args, **kwargs):
sample_args = self()
return sample_args.split(*args, **kwargs)
class FunctionHandle(object):
"""Special handle to allow function composition.
For example, suppose fn1 = f o g
fn2 = h
and we want to display fn1(fn2(x)) = f(g(h(x))). This function basically just
stores the list of sympy functions.
"""
def __init__(self, *function_entities):
"""Initialize a `FunctionHandle`.
Args:
*function_entities: List of function letters and `Entity`s representing
functions, to be composed.
"""
self._functions = []
for fn in function_entities:
if isinstance(fn, str):
functions = [sympy.Function(fn)]
else:
assert isinstance(fn, Entity)
assert isinstance(fn.handle, FunctionHandle)
functions = fn.handle.functions
self._functions += functions
def apply(self, *input_):
"""Returns f(g(...(input)...)) where f, g, ... are the functions."""
result = None
for function in reversed(self._functions):
if result is None:
result = function(*input_)
else:
result = function(result)
return result
@property
def functions(self):
return self._functions
def __str__(self):
raise ValueError('This should not be directly converted to a string')
def _polynomial_entity(value, context):
"""Create a generic `Entity` describing a polynomial."""
assert isinstance(value, Polynomial)
coefficients = np.asarray(value.coefficients)
num_variables = coefficients.ndim
variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]
function_symbol = context.pop()
handle = FunctionHandle(function_symbol)
handle_description = sympy.Function(function_symbol)(*variables)
polynomial = polynomials.coefficients_to_polynomial(coefficients, variables)
polynomial = polynomial.sympy()
return Entity(
context=context,
value=value,
expression=polynomial,
polynomial_variables=variables,
description='Let {function} = {polynomial}.',
handle=handle,
function=handle_description,
polynomial=polynomial)
class Context(object):
"""Keeps track of used symbols, and sampling of children.
Each context is associated with an entity. Entities are constructed in a
tree-like fashion.
"""
def __init__(self, relation_symbols=None):
"""Initializes a `Context`.
Args:
relation_symbols: Set of symbols used "externally": this means (with
reference to the tree in which this context sits) all symbols
occurring that aren't in this node or sub-nodes.
"""
if relation_symbols is None:
relation_symbols = set()
else:
assert isinstance(relation_symbols, set)
for symbol in relation_symbols:
assert isinstance(symbol, str)
self._relation_symbols = relation_symbols
self._self_symbols = set()
self._child_symbols = set()
self._module_count = 1
self._child_entities = []
@property
def relation_symbols(self):
return self._relation_symbols.copy()
@property
def self_symbols(self):
return self._self_symbols.copy()
@property
def child_symbols(self):
return self._child_symbols.copy()
@property
def child_entities(self):
return self._child_entities[:]
def pop(self):
"""Returns an unused symbol (and keeps track of it being used)."""
allowed = (_ALLOWED_SYMBOLS
.difference(self._relation_symbols)
.difference(self._self_symbols)
.difference(self._child_symbols))
if not allowed:
raise ValueError('Ran out of symbols')
symbol = random.choice(list(allowed))
self._self_symbols.add(symbol)
return symbol
def mark_used(self, symbol):
"""Marks a given symbol as used."""
assert isinstance(symbol, str)
if (symbol in self._relation_symbols
or symbol in self._self_symbols
or symbol in self._child_symbols):
raise ValueError('Symbol {} already used'.format(symbol))
self._self_symbols.add(symbol)
@property
def module_count(self):
"""Returns the number of modules sampled."""
return self._module_count
def _sampler(self, value, sample_args):
"""Returns a sampler appropriate for the given args.
Args:
value: Target value.
sample_args: Instance of `SampleArgs` controlling entropy etc.
Returns:
A sampler for producing entities.
Raises:
ValueError: If no valid samplers were found.
"""
valid = []
for filter_, sampler in _FILTERS_AND_SAMPLERS:
if filter_(value):
valid.append(sampler)
if not valid:
raise ValueError('No valid samplers found: value={} sample_args={}'
.format(value, sample_args))
return random.choice(valid)
def _value_entity(self, value, context):
if isinstance(value, (sympy.Integer, sympy.Rational, display.Decimal)):
return Entity(context=context, value=value, handle=value)
if isinstance(value, Polynomial):
return _polynomial_entity(value, context)
raise ValueError('Don\'t know how to handle value={} of type {}'
.format(value, type(value)))
def sample(self, sample_args, values):
"""Sample multiple entities.
Args:
sample_args: Instance of `SampleArgs`. The min and max entropy
and module count will be split up betwene the various entities
sampled.
values: List of values to sample.
Returns:
List of `Entity` of the same length as `types`.
Raises:
RuntimeError: If one of the modules generates a non-`Entity`.
"""
# Can only sample children once.
assert self._module_count == 1
assert not self._child_symbols
assert not self._child_entities
if isinstance(sample_args, PreSampleArgs):
sample_args = sample_args()
sample_args_split = sample_args.split(len(values))
def all_symbols():
return (self._relation_symbols
.union(self._self_symbols)
.union(self._child_symbols))
for value, child_sample_args in zip(values, sample_args_split):
if number.is_integer(value):
value = sympy.Integer(value)
all_symbols_ = all_symbols()
context = Context(all_symbols_)
if child_sample_args.num_modules == 0:
entity = self._value_entity(value, context)
else:
sampler = self._sampler(value, child_sample_args)
entity = sampler(value, child_sample_args, context)
if not isinstance(entity, Entity):
raise RuntimeError(
'Expected entity, but got {} instead'.format(entity))
if (not number.is_integer_or_rational_or_decimal(entity.value)
and not isinstance(entity.value, Polynomial)):
raise RuntimeError('sampler {} returned invalid value of type {}'
.format(sampler, type(entity.value)))
if ((number.is_integer_or_rational_or_decimal(value)
and entity.value != value)
or (isinstance(value, Polynomial) and not np.array_equal(
entity.value.coefficients, value.coefficients))):
raise RuntimeError(
'entity values differ, sampler={} wanted={} got={}'
.format(sampler, value, entity.value))
if child_sample_args.num_modules != context.module_count:
raise RuntimeError(
'unused modules, value={} sample_args={} context.module_count={},'
' sampler={}'
.format(value, child_sample_args, context.module_count, sampler))
self._module_count += context.module_count
self._child_entities.append(entity)
for symbol in context.self_symbols.union(context.child_symbols):
assert symbol not in all_symbols_
self._child_symbols.add(symbol)
return self._child_entities
def sample_by_replacing_constants(self, sample_args, expressions):
"""Replaces some of the constants with handles from other modules."""
max_children = sample_args.num_modules - 1
if max_children <= 0:
return
if isinstance(expressions, ops.Op):
expressions = [expressions]
constants = ops.number_constants(expressions)
if not constants:
raise ValueError('No constants to replace in {}'
.format([str(expr) for expr in expressions]))
sample_count = random.randint(1, min(max_children, len(constants)))
constants = random.sample(constants, sample_count)
values = [constant.value for constant in constants]
entities = self.sample(sample_args, values)
for constant, entity in zip(constants, entities):
constant.value = entity.handle
def expand_entities(context, **kwargs):
"""Returns prefix description and updated `**kwargs`.
Args:
context: Instance of `Context` containing all `Entity`s that occur.
**kwargs: Dictionary of key/value pairs, some of which are `Entity`s.
Returns:
Pair `(child_description, new_kwargs)`. `child_description` is a description
of the entities contained in `kwargs`, and `new_kwargs` contains handles.
"""
kwargs = kwargs.copy()
entities = set(context.child_entities)
for key, maybe_entity in six.iteritems(kwargs):
if isinstance(maybe_entity, Entity):
entities.add(maybe_entity)
kwargs[key] = maybe_entity.handle
entities = list(entities)
random.shuffle(entities)
child_descriptions = []
for entity in entities:
child_descriptions.append(entity.child_description)
if not entity.expression_used:
child_descriptions.append(entity.description)
child_description = ' '.join([s for s in child_descriptions if s])
return child_description, kwargs
class Entity(object):
"""An entity (e.g., representing an integer or function).
Example usage:
```
new_entity = Entity(
context=context,
value=17,
description='Let {self} be the gcd of {q} and {q}.'
p=p, q=q)
```
Above, {self} will be replaced with a new symbol, to be used as the entity
handle.
"""
def __init__(self, context, value, description='', handle=None,
expression=None, polynomial_variables=None,
**description_kwargs):
"""Initializes an entity.
Args:
context: Instance of `Context` keeping track of used symbols and child
entities (that may not explicitly occur in `description_kwargs`).
value: The value represented by this Entity.
description: String, describing this Entity. This can contain '{self}' as
a substring, in which case it will be substituted with a new handle
(and `context` must be non-None).
handle: Optional string/symbol that refers to this Entity. This must
always be provided if '{self}' does not occur in `description`.
expression: Optional string (or something that can be converted to a
string, like ops.Op or sympy.expr) representing the value of this
Entity, that can be used directly instead of using `handle`.
polynomial_variables: If `expression` provided, then for polynomial
entities, the variables used.
**description_kwargs: Dict of substitutions (including entities) for use
in `description`.
Raises:
ValueError: If handle specified and '{self}' in description; or neither.
"""
self._value = value
child_description, description_kwargs = expand_entities(
context, **description_kwargs)
if '{self}' in description:
if handle is not None:
raise ValueError('Cannot specify handle if {self} in description')
handle = context.pop()
description_kwargs['self'] = handle
handle = sympy.var(handle)
else:
if handle is None:
raise ValueError('Must specify handle if {self} not in description')
if isinstance(handle, str):
handle = sympy.var(handle)
if (isinstance(value, Polynomial)
and expression is not None
and polynomial_variables is None):
raise ValueError('Must provided polynomial_variables')
self._child_description = child_description
self._description = description.format(**description_kwargs)
self._handle = handle
self._expression = expression
self._polynomial_variables = polynomial_variables
# For checking that we don't use both the handle and the expression form of
# this entity.
self._handle_used = False
self._expression_used = False
@property
def value(self):
"""The actual value of the entity (e.g., sympy object)."""
return self._value
@property
def child_description(self):
"""A string describing the entities that this Entity relies on."""
return self._child_description
@property
def description(self):
"""A string describing the entity."""
assert not self._expression_used
self._handle_used = True
return self._description
def has_expression(self):
"""Returns whether there is an expression to use (instead of handle)."""
return self._expression is not None
@property
def handle(self):
"""A "handle", e.g., SymPy symbol or `function.CompositionHandle`."""
assert not self._expression_used
self._handle_used = True
return self._handle
@property
def expression(self):
"""An expression representing the entity; possibly None.
If this is used, then `description` does not need to get used.
Returns:
String or None.
"""
assert not self._handle_used
self._expression_used = True
return self._expression
@property
def polynomial_variables(self):
"""For when `expression` is not None, and this entity is a polynomial."""
return self._polynomial_variables
@property
def expression_used(self):
"""Returns true if expression instead of handle was used."""
return self._expression_used
@property
def expression_else_handle(self):
"""Returns an expression if present, else the handle."""
if self.has_expression():
return self.expression
else:
return self.handle
def __str__(self):
"""Raises value error - should not attempt to convert to string directly."""
raise ValueError('Should not convert Entity directly to string')
| mathematics_dataset-master | mathematics_dataset/util/composition.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.util.display."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.util import display
import sympy
class DecimalTest(absltest.TestCase):
def testBasic_integer(self):
decimal = display.Decimal(123)
self.assertEqual(str(decimal), '123')
self.assertEqual(sympy.sympify(decimal), sympy.Integer(123))
self.assertEqual(decimal.decimal_places(), 0)
def testBasic_ten(self):
decimal = display.Decimal(10)
self.assertEqual(str(decimal), '10')
self.assertEqual(sympy.sympify(decimal), sympy.Integer(10))
self.assertEqual(decimal.decimal_places(), 0)
def testBasic(self):
decimal = display.Decimal(sympy.Rational(123, 100))
self.assertEqual(str(decimal), '1.23')
self.assertEqual(sympy.sympify(decimal), sympy.Rational(123, 100))
self.assertEqual(decimal.decimal_places(), 2)
def testStr(self):
self.assertEqual(str(display.Decimal(sympy.Rational(0, 10))), '0')
self.assertEqual(str(display.Decimal(sympy.Rational(-1, 10))), '-0.1')
self.assertEqual(str(display.Decimal(sympy.Rational(-11, 10))), '-1.1')
self.assertEqual(str(display.Decimal(sympy.Rational(11, 10))), '1.1')
self.assertEqual(str(display.Decimal(sympy.Rational(101, 1))), '101')
self.assertEqual(
str(display.Decimal(sympy.Rational(20171, 1000000))), '0.020171')
def testStr_verySmall(self):
# Tests it doesn't display in "scientific" notation 1E-9.
decimal = display.Decimal(sympy.Rational(1, 1000000000))
self.assertEqual(str(decimal), '0.000000001')
def testAdd(self):
self.assertEqual((display.Decimal(2) + display.Decimal(3)).value, 5)
def testSub(self):
self.assertEqual((display.Decimal(2) - display.Decimal(3)).value, -1)
def testMul(self):
self.assertEqual((display.Decimal(2) * display.Decimal(3)).value, 6)
def testRound(self):
decimal = display.Decimal(sympy.Rational(2675, 1000)) # 2.675
self.assertEqual(sympy.sympify(decimal.round()), sympy.Integer(3))
self.assertEqual(sympy.sympify(decimal.round(1)), sympy.Rational(27, 10))
self.assertEqual(sympy.sympify(decimal.round(2)), sympy.Rational(268, 100))
self.assertEqual(sympy.sympify(decimal.round(3)),
sympy.Rational(2675, 1000))
def testInt(self):
decimal = display.Decimal(123)
self.assertEqual(int(decimal), 123)
def testInt_errorIfNonInt(self):
decimal = display.Decimal(sympy.Rational(1, 2))
with self.assertRaisesRegex(self, TypeError, 'Cannot represent'):
int(decimal)
def testComparison(self):
decimal = display.Decimal(sympy.Rational(-1, 2))
# pylint: disable=g-generic-assert
self.assertFalse(decimal != -0.5)
self.assertTrue(decimal != 0)
self.assertFalse(decimal < -0.5)
self.assertTrue(decimal < 0)
self.assertTrue(decimal <= -0.5)
self.assertTrue(decimal <= 0)
self.assertFalse(decimal > -0.5)
self.assertTrue(decimal > -1)
self.assertTrue(decimal >= -0.5)
self.assertFalse(decimal >= 0)
self.assertFalse(decimal == 0)
self.assertTrue(decimal == -0.5)
def testNegation(self):
decimal = display.Decimal(sympy.Rational(1, 2))
decimal = -decimal
self.assertNotEqual(decimal, 0.5)
self.assertEqual(decimal, -0.5)
class PercentageTest(absltest.TestCase):
def testPercentage(self):
percentage = display.Percentage(1.5)
self.assertEqual(str(percentage), '150%')
percentage = display.Percentage(sympy.Rational(67, 100))
self.assertEqual(str(percentage), '67%')
percentage = display.Percentage(sympy.Rational(67, 1000))
self.assertEqual(str(percentage), '6.7%')
class NonSimpleRationalTest(absltest.TestCase):
def testBasic(self):
frac = display.NonSimpleRational(4, 6)
self.assertEqual(frac.numer, 4)
self.assertEqual(frac.denom, 6)
self.assertEqual(str(frac), '4/6')
class StringNumberTest(absltest.TestCase):
def testIntegerToWords(self):
words = display.StringNumber(0)
self.assertEqual(str(words), 'zero')
self.assertEqual(sympy.sympify(words), 0)
words = display.StringNumber(8)
self.assertEqual(str(words), 'eight')
self.assertEqual(sympy.sympify(words), 8)
words = display.StringNumber(12)
self.assertEqual(str(words), 'twelve')
self.assertEqual(sympy.sympify(words), 12)
words = display.StringNumber(30)
self.assertEqual(str(words), 'thirty')
self.assertEqual(sympy.sympify(words), 30)
words = display.StringNumber(100)
self.assertEqual(str(words), 'one-hundred')
self.assertEqual(sympy.sympify(words), 100)
words = display.StringNumber(103)
self.assertEqual(str(words), 'one-hundred-and-three')
self.assertEqual(sympy.sympify(words), 103)
words = display.StringNumber(15439822)
self.assertEqual(str(words), 'fifteen-million-four-hundred-and-thirty-nine'
'-thousand-eight-hundred-and-twenty-two')
self.assertEqual(sympy.sympify(words), 15439822)
def testRationalToWords(self):
words = display.StringNumber(sympy.Rational(2, 3))
self.assertEqual(str(words), 'two thirds')
class StringOrdinalTest(absltest.TestCase):
def testBasic(self):
ordinal = display.StringOrdinal(0)
self.assertEqual(str(ordinal), 'zeroth')
ordinal = display.StringOrdinal(10)
self.assertEqual(str(ordinal), 'tenth')
def testCreate_errorIfNegative(self):
with self.assertRaisesRegex(self, ValueError, 'Unsupported ordinal'):
display.StringOrdinal(-1)
class NumberListTest(absltest.TestCase):
def testBasic(self):
numbers = [2, 3, 1]
number_list = display.NumberList(numbers)
string = str(number_list)
self.assertEqual(string, '2, 3, 1')
class NumberInBaseTest(absltest.TestCase):
def testBasic(self):
self.assertEqual(str(display.NumberInBase(1, 10)), '1')
self.assertEqual(str(display.NumberInBase(-1, 10)), '-1')
self.assertEqual(str(display.NumberInBase(1, 2)), '1')
self.assertEqual(str(display.NumberInBase(-1, 2)), '-1')
self.assertEqual(str(display.NumberInBase(2, 2)), '10')
self.assertEqual(str(display.NumberInBase(-2, 2)), '-10')
self.assertEqual(str(display.NumberInBase(10, 16)), 'a')
self.assertEqual(str(display.NumberInBase(16, 16)), '10')
self.assertEqual(str(display.NumberInBase(256, 16)), '100')
self.assertEqual(str(display.NumberInBase(-75483, 10)), '-75483')
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/util/display_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.sample.linear_system."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from mathematics_dataset.sample import linear_system
from six.moves import range
import sympy
class ExpressionWithValueTest(parameterized.TestCase):
def testIsTrivialIn(self):
self.assertEqual(linear_system._is_trivial_in([[1]], 0), False)
self.assertEqual(linear_system._is_trivial_in([[1, 2], [3, 4]], 0), False)
self.assertEqual(linear_system._is_trivial_in([[1, 2], [3, 0]], 0), True)
self.assertEqual(linear_system._is_trivial_in([[1, 2], [3, 0]], 1), False)
self.assertEqual(linear_system._is_trivial_in([[1, 2], [0, 3]], 0), False)
self.assertEqual(linear_system._is_trivial_in([[1, 2], [0, 3]], 1), True)
@parameterized.parameters([1, 2, 3])
def testLinearSystem(self, degree):
for _ in range(100): # test a few times
target = [random.randint(-100, 100) for _ in range(degree)]
variables = [sympy.Symbol(chr(ord('a') + i)) for i in range(degree)]
system = linear_system.linear_system(
variables=variables,
solutions=target,
entropy=10.0)
solved = sympy.solve(system, variables)
solved = [solved[symbol] for symbol in variables]
self.assertEqual(target, solved)
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/sample/linear_system_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample arithmetic expressions with a given value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import random
# Dependency imports
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.util import combinatorics
import numpy as np
import six
from six.moves import zip
import sympy
class _SampleArgs(collections.namedtuple('SampleArgs', ('count', 'entropy'))):
"""For sampling mathematical expressions."""
def peel(self, frac=1):
"""Peels one (or `frac`) of an op's entropy."""
entropy = frac * self.entropy / self.count
new_sample_args = _SampleArgs(self.count, self.entropy - entropy)
return entropy, new_sample_args
def split(self, args):
"""Splits the entropy and op counts up."""
non_integer_count = sum(not arg.is_Integer for arg in args)
assert non_integer_count <= self.count - 1
count_split = combinatorics.uniform_non_negative_integers_with_sum(
len(args), (self.count - 1) - non_integer_count)
for i, arg in enumerate(args):
if not arg.is_Integer:
count_split[i] += 1
if all(count == 0 for count in count_split):
assert self.entropy == 0
entropies = np.zeros(len(count_split))
else:
entropies = (
np.random.dirichlet(np.maximum(1e-9, count_split)) * self.entropy)
return [_SampleArgs(op_count, entropy)
for op_count, entropy in zip(count_split, entropies)]
def _add_sub_filter(value, sample_args):
return sample_args.count >= 2 or value.is_Integer
def _add_op(value, sample_args, rationals_allowed):
"""Returns sampled args for `ops.Add`."""
entropy, sample_args = sample_args.peel()
if rationals_allowed and sample_args.count >= 3:
x = number.integer_or_rational(entropy, True)
else:
x = number.integer(entropy, True)
if random.choice([False, True]):
op_args = [x, value - x]
else:
op_args = [value - x, x]
return ops.Add, op_args, sample_args
def _sub_op(value, sample_args, rationals_allowed):
"""Returns sampled args for `ops.Sub`."""
entropy, sample_args = sample_args.peel()
if rationals_allowed and sample_args.count >= 3:
x = number.integer_or_rational(entropy, True)
else:
x = number.integer(entropy, True)
if random.choice([False, True]):
op_args = [x, x - value]
else:
op_args = [value + x, x]
return ops.Sub, op_args, sample_args
def _entropy_of_factor_split(integer):
"""Returns entropy (log base 10) of decomposing: integer = a * b."""
assert integer.is_Integer
if integer == 0:
return 0
# Gives dict of form {factor: multiplicity}
factors = sympy.factorint(integer)
return sum(math.log10(mult + 1) for mult in six.itervalues(factors))
def _split_factors(integer):
"""Randomly factors integer into product of two integers."""
assert integer.is_Integer
if integer == 0:
return [1, 0]
# Gives dict of form {factor: multiplicity}
factors = sympy.factorint(integer)
left = sympy.Integer(1)
right = sympy.Integer(1)
for factor, mult in six.iteritems(factors):
left_mult = random.randint(0, mult)
right_mult = mult - left_mult
left *= factor ** left_mult
right *= factor ** right_mult
return left, right
def _mul_filter(value, sample_args):
if sample_args.count >= 2:
return True
if not value.is_Integer:
return False
return sample_args.entropy <= _entropy_of_factor_split(value)
def _mul_op(value, sample_args, rationals_allowed):
"""Returns sampled args for `ops.Mul`."""
if sample_args.count >= 3:
_, op_args, sample_args = _div_op(value, sample_args, rationals_allowed)
op_args = [op_args[0], sympy.Integer(1) / op_args[1]]
elif sample_args.count == 1:
entropy, sample_args = sample_args.peel()
assert _entropy_of_factor_split(value) >= entropy
op_args = _split_factors(value)
else:
assert sample_args.count == 2
entropy, sample_args = sample_args.peel()
numer = sympy.numer(value)
denom = sympy.denom(value)
p1, p2 = _split_factors(numer)
entropy -= _entropy_of_factor_split(numer)
mult = number.integer(entropy, signed=True, min_abs=1, coprime_to=p1)
op_args = [p1 / (mult * denom), p2 * mult]
if random.choice([False, True]):
op_args = list(reversed(op_args))
return ops.Mul, op_args, sample_args
def _div_filter(value, sample_args):
del value # unused
del sample_args # unused
return True
def _div_op(value, sample_args, rationals_allowed):
"""Returns sampled args for `ops.Div`."""
assert rationals_allowed # should be True if this function gets invoked
entropy, sample_args = sample_args.peel()
numer = sympy.numer(value)
denom = sympy.denom(value)
if sample_args.count == 1:
mult = number.integer(entropy, signed=True, min_abs=1)
op_args = [numer * mult, denom * mult]
elif sample_args.count == 2:
if numer == 0 or random.choice([False, True]):
x = number.integer(entropy, signed=True, min_abs=1, coprime_to=denom)
op_args = [sympy.Rational(x * numer, denom), x]
else:
x = number.integer(entropy, signed=True, min_abs=1, coprime_to=numer)
op_args = [x, sympy.Rational(x * denom, numer)]
else:
assert sample_args.count >= 3
p2, p1 = _split_factors(numer)
q1, q2 = _split_factors(denom)
entropy -= _entropy_of_factor_split(numer) + _entropy_of_factor_split(denom)
entropy_r = random.uniform(0, entropy)
entropy_s = entropy - entropy_r
r = number.integer(entropy_r, signed=True, min_abs=1, coprime_to=q1*p2)
s = number.integer(entropy_s, signed=False, min_abs=1, coprime_to=p1*q2)
op_args = [sympy.Rational(r*p1, s*q1), sympy.Rational(r*q2, s*p2)]
return ops.Div, op_args, sample_args
def _arithmetic(value, sample_args, add_sub, mul_div):
"""Internal arithmetic thingy...."""
assert sample_args.count >= 0
if sample_args.count == 0:
assert sample_args.entropy == 0
return ops.Constant(value)
allowed = []
if add_sub and _add_sub_filter(value, sample_args):
allowed.append(_add_op)
allowed.append(_sub_op)
if mul_div and _mul_filter(value, sample_args):
allowed.append(_mul_op)
if mul_div and _div_filter(value, sample_args):
allowed.append(_div_op)
if not allowed:
raise ValueError(
'No valid ops found, add_sub={} mul_div={} value={} sample_args={}'
.format(add_sub, mul_div, value, sample_args))
choice = random.choice(allowed)
op, args, sample_args = choice(value, sample_args, rationals_allowed=mul_div)
sample_args = sample_args.split(args)
child_expressions = [_arithmetic(arg, child_sample_arg, add_sub, mul_div)
for arg, child_sample_arg in zip(args, sample_args)]
return op(*child_expressions)
def length_range_for_entropy(entropy):
"""Returns length range to sample from for given entropy."""
min_length = 3
max_length = min_length + int(entropy / 2)
return min_length, max_length
def arithmetic(value, entropy, length=None, add_sub=True, mul_div=True):
"""Generates an arithmetic expression with a given value.
Args:
value: Target value (integer or rational).
entropy: Amount of randomness to use in generating expression.
length: Number of ops to use. If `None` then suitable length will be picked
based on entropy by sampling within the range
`length_range_for_entropy`.
add_sub: Whether to include addition and subtraction operations.
mul_div: Whether to include multiplication and division operations.
Returns:
Instance of `ops.Op` containing expression.
"""
assert isinstance(entropy, float)
if length is None:
min_length, max_length = length_range_for_entropy(entropy)
length = random.randint(min_length, max_length)
# Some entropy used up in sampling the length.
entropy -= math.log10(max_length - min_length + 1)
else:
assert isinstance(length, int)
# Entropy adjustment, because different binary trees (from sampling ops) can
# lead to the same expression. This is the correct value when we use just
# addition as the op, and is otherwise an an upper bound.
entropy += combinatorics.log_number_binary_trees(length) / math.log(10)
value = sympy.sympify(value)
sample_args = _SampleArgs(length, entropy)
return _arithmetic(value, sample_args, add_sub, mul_div)
| mathematics_dataset-master | mathematics_dataset/sample/arithmetic.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate linear systems with given set of solutions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.sample import polynomials
import numpy as np
from six.moves import range
import sympy
def _make_equals_zero_split(monomials):
"""Returns an `ops.Eq` containing sum of monomials split on left and right."""
left = []
right = []
for monomial in monomials:
if random.choice([False, True]):
left.append(monomial)
else:
right.append(ops.Neg(monomial))
if not left:
left = [0]
if not right:
right = [0]
left = ops.Add(*left)
right = ops.Add(*right)
return ops.Eq(left, right)
def _is_trivial_in(matrix, variable):
"""Returns true if matrix_ij == 0 for some i and all j != variable."""
matrix = np.asarray(matrix)
assert matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]
size = matrix.shape[0]
if size == 1:
return False
for i in range(size):
all_zero = True
for j in range(size):
if j != variable and matrix[i, j] != 0:
all_zero = False
break
if all_zero:
return True
return False
def _invertible_matrix(degree, entropy, non_trivial_in):
"""Generates random invertible matrix."""
matrix_entropies = entropy * np.random.dirichlet(np.ones(degree * degree))
matrix_entropies = np.reshape(matrix_entropies, [degree, degree])
matrix_entropies = np.maximum(1, matrix_entropies)
while True:
def gen(i, j):
return number.integer(matrix_entropies[i, j], True)
matrix = [[gen(i, j) for i in range(degree)] for j in range(degree)] # pylint: disable=g-complex-comprehension
if non_trivial_in is not None and _is_trivial_in(matrix, non_trivial_in):
continue
if sympy.det(sympy.Matrix(matrix)) != 0:
break
matrix = np.asarray(matrix).astype(int)
return matrix
def linear_system(variables, solutions, entropy, non_trivial_in=None,
length=None):
"""Returns a linear system (set of equalities) with the given solutions.
Args:
variables: List of variables.
solutions: List of solutions, of the same length as `variables`.
entropy: Float >= 0; the entropy used.
non_trivial_in: Optional integer corresponding to a variable for which the
solution shouldn't be "trivial". E.g., "solve a + b = 3, a = -2 for a"
is disallowed if `variables[non_trivial_in] == 'a'`.
length: Total number of terms appearing; if `None` then selected wisely.
Returns:
List of `ops.Eq`.
"""
degree = len(variables)
assert degree == len(solutions)
frac_entropy_matrix = random.uniform(1/3, 2/3)
matrix = _invertible_matrix(
degree, entropy * frac_entropy_matrix, non_trivial_in)
solutions = np.asarray(solutions)
constant = np.matmul(matrix, solutions.astype(int))
flattened = np.concatenate([np.reshape(matrix, [degree * degree]), constant])
is_zero = flattened == 0
if length is None:
min_length = np.count_nonzero(flattened) + 1
max_length = max(min_length, 1 + int(degree * (1 + entropy / 2)))
length = random.randint(min_length, max_length)
counts = polynomials.expanded_coefficient_counts(
length=length, is_zero=is_zero)
entropies = (1 - frac_entropy_matrix) * entropy * np.random.dirichlet(
np.maximum(1e-9, counts - 1))
terms = []
for i in range(len(flattened)):
coeffs = polynomials.integers_with_sum(
value=flattened[i], count=counts[i], entropy=entropies[i])
terms.append(coeffs)
matrix = terms[:degree*degree]
constant = terms[-degree:]
equations = []
for row_index in range(degree):
monomials = []
for col_index in range(degree):
for term in matrix[row_index * degree + col_index]:
monomials.append(polynomials.monomial(term, variables[col_index], 1))
for term in constant[row_index]:
monomials.append(polynomials.monomial(-term, None, 0))
equations.append(_make_equals_zero_split(monomials))
return equations
| mathematics_dataset-master | mathematics_dataset/sample/linear_system.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate random integers and rationals with minimum guarantees on entropy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
# Dependency imports
from mathematics_dataset.util import display
import numpy as np
import six
import sympy
def _coprime_density(value):
"""Returns float > 0; asymptotic density of integers coprime to `value`."""
factors = sympy.factorint(value)
density = 1.0
for prime in six.iterkeys(factors):
density *= 1 - 1 / prime
return density
def integer(entropy, signed, min_abs=0, coprime_to=1):
"""Returns an integer from a set of size ceil(10**entropy).
If `signed` is True, then includes negative integers, otherwise includes just
positive integers.
Args:
entropy: Float >= 0.
signed: Boolean. Whether to also return negative numbers.
min_abs: Integer >= 0. The minimum absolute value.
coprime_to: Optional integer >= 1. The returned integer is guaranteed to be
coprime to `coprime_to`, with entropy still accounted for.
Returns:
Integer.
"""
assert isinstance(min_abs, int) and not isinstance(min_abs, bool)
coprime_to = abs(coprime_to)
assert min_abs >= 0
max_ = math.pow(10, entropy)
max_ += min_abs
if coprime_to >= 2:
max_ = max_ / _coprime_density(coprime_to) + 1
if signed:
max_ = int(math.ceil(max_ / 2))
range_ = [-max_, max_]
else:
max_ = int(math.ceil(max_))
range_ = [min_abs, max_]
while True:
value = random.randint(*range_)
if abs(value) >= min_abs and sympy.gcd(value, coprime_to) == 1:
break
return sympy.Integer(value)
def non_integer_rational(entropy, signed):
"""Similar args to `integer`. Entropy split between denom and numer."""
numer_entropy = random.uniform(0, entropy)
denom_entropy = entropy - numer_entropy
numer = integer(numer_entropy, signed, min_abs=1)
denom = integer(denom_entropy, False, min_abs=2, coprime_to=numer)
return sympy.Rational(numer, denom)
def integer_or_rational(entropy, signed, min_abs=0):
"""Returns a rational, with 50% probability of it being an integer."""
if random.choice([False, True]):
return integer(entropy, signed, min_abs=min_abs)
else:
return non_integer_rational(entropy, signed)
def non_integer_decimal(entropy, signed):
"""Returns a random decimal; integer divided by random power of ten.
Guaranteed to be non-integer (i.e., numbers after the decimal point).
Args:
entropy: Float.
signed: Boolean. Whether to also return negative numbers.
Returns:
Non-integer decimal.
"""
while True:
base = integer(entropy, signed)
shift = random.randint(1, int(math.ceil(entropy)))
divisor = 10**shift
if base % divisor != 0:
return display.Decimal(sympy.Rational(base, divisor))
def integer_or_decimal(entropy, signed):
"""Returns integer or non-integer decimal; 50% probability of each."""
if random.choice([False, True]):
# Represent it as a decimal so that arithmetic operations are supported:
return display.Decimal(integer(entropy, signed))
else:
return non_integer_decimal(entropy, signed)
def entropy_of_value(value):
"""Returns "min entropy" that would give probability of getting this value."""
if isinstance(value, display.Decimal):
return entropy_of_value(sympy.numer(value))
if is_non_integer_rational(value):
numer = sympy.numer(value)
denom = sympy.denom(value)
return entropy_of_value(numer) + entropy_of_value(denom)
elif not is_integer(value):
raise ValueError('Unhandled value: {}'.format(value))
# Note: we sample integers in a range of size approx 10**entropy about zero,
# so assume that `abs(value)` is about half of the upper range.
return math.log10(5 * abs(value) + 1)
def is_integer(value):
return isinstance(value, (int, np.int64, np.int32, sympy.Integer))
def is_positive_integer(value):
"""Filter for: value is a strictly positive integer."""
return is_integer(value) and value > 0
def is_integer_or_rational(value):
return is_integer(value) or isinstance(value, sympy.Rational)
def is_integer_or_decimal(value):
return is_integer(value) or isinstance(value, display.Decimal)
def is_integer_or_rational_or_decimal(value):
return is_integer_or_rational(value) or is_integer_or_decimal(value)
def is_non_integer_rational(value):
return is_integer_or_rational(value) and not is_integer(value)
| mathematics_dataset-master | mathematics_dataset/sample/number.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mathematics_dataset-master | mathematics_dataset/sample/__init__.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate polynomials with given values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
# Dependency imports
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.util import combinatorics
import numpy as np
import six
from six.moves import range
from six.moves import zip
import sympy
from sympy.solvers.diophantine import base_solution_linear as diophantine_solve_linear_2d
def expanded_coefficient_counts(length, is_zero):
"""Generates list of integers for number of terms of given power.
Args:
length: Integer >= `sum(is_zero)`.
is_zero: List of booleans.
Returns:
List of non-negative integers of length `is_zero`, summing to `length`,
such that if `is_zero[i]` then `return_value[i] != 1`.
Raises:
ValueError: If assignment not possible.
"""
if length == 1 and all(is_zero):
raise ValueError('length=1 and all zero')
counts = np.asarray([0 if zero else 1 for zero in is_zero])
extra_needed = (length - sum(counts))
if extra_needed < 0:
raise ValueError('length={} cannot handle is_zero={}'
.format(length, is_zero))
extra = combinatorics.uniform_non_negative_integers_with_sum(
count=len(is_zero), sum_=extra_needed)
counts += np.asarray(extra)
# Tweak so that no zeros get "1".
while True:
bad_zeros = [
i for i in range(len(is_zero)) if is_zero[i] and counts[i] == 1
]
if not bad_zeros:
break
take_from = random.choice(bad_zeros)
add_to = random.choice(
[i for i in range(len(is_zero)) if counts[i] >= 1 and i != take_from])
counts[take_from] -= 1
counts[add_to] += 1
return counts
def _split_value_equally(delta, count):
"""Splits an integer or rational into roughly equal parts."""
numer = sympy.numer(delta)
denom = sympy.denom(delta)
return [int(math.floor((numer + i) / count)) / denom for i in range(count)]
def integers_with_sum(value, count, entropy):
"""Returns list of integers with a given sum.
Args:
value: Target value.
count: Integer >= 1; the number of integers to use.
entropy: Entropy to use (in total).
Returns:
List of numbers summing to `value`.
Raises:
ValueError: If `value` is not an integer.
"""
# Special cases.
if count == 0:
assert value == 0
assert entropy == 0
return []
if count == 1:
assert entropy == 0
return [value]
if not number.is_integer(value):
raise ValueError('value={} (type={}) is not an integer'
.format(value, type(value)))
# Because e.g., (1, 1) and (2, 2) will both map to the same set of integers
# when we normalize to have sum equal to `value`.
entropy *= count / (count - 1)
min_term_entropy = max(
1, number.entropy_of_value(int(math.ceil(value/count))))
term_entropies = entropy * np.random.dirichlet(np.ones(count))
term_entropies = np.maximum(min_term_entropy, term_entropies)
terms = [number.integer(term_entropy, signed=True)
for term_entropy in term_entropies]
delta = value - sum(terms)
deltas = _split_value_equally(delta, count)
terms = [term + delta for term, delta in zip(terms, deltas)]
random.shuffle(terms)
return terms
def monomial(coefficient, variables, powers):
"""Makes a simple monomial term."""
if not isinstance(variables, (list, tuple)):
variables = [variables]
if not isinstance(powers, (list, tuple, np.ndarray)):
powers = [powers]
terms = []
for variable, power in zip(variables, powers):
if power == 0:
continue
elif power == 1:
terms.append(variable)
else:
terms.append(ops.Pow(variable, power))
if (not terms
or isinstance(coefficient, sympy.Symbol)
or abs(coefficient) != 1):
if isinstance(coefficient, sympy.Symbol):
terms.insert(0, coefficient)
else:
terms.insert(0, abs(coefficient))
if len(terms) > 1:
term = ops.Mul(*terms)
else:
term = terms[0]
if not isinstance(coefficient, sympy.Symbol) and coefficient < 0:
term = ops.Neg(term)
return term
def sample_coefficients(degrees, entropy, min_non_zero=0, max_non_zero=None):
"""Generates grid of coefficients with shape `degrees + 1`.
This corresponds to univariate if degrees has length 1, otherwise
multivariate.
Args:
degrees: List of integers containing max degrees of variables.
entropy: Float >= 0; entropy for generating entries.
min_non_zero: Optional integer >= 1; the minimum number of non-zero coeffs.
max_non_zero: Optional integer >= 1; the maximum number of non-zero coeffs.
Returns:
NumPy int array of shape `degrees + 1`.
"""
if isinstance(degrees, int):
degrees = [degrees]
degrees = np.asarray(degrees)
def random_index():
return [random.randint(0, degrees[i]) for i in range(len(degrees))]
indices = set()
# Ensure a variable of degree `degrees[i]` occurs for every axis i.
for i, degree in enumerate(degrees):
if degree > 0:
index = random_index()
index[i] = degree
indices.add(tuple(index))
abs_max_non_zero = np.prod(degrees + 1)
min_non_zero = max(min_non_zero, 1, len(indices))
if max_non_zero is None:
max_non_zero = min_non_zero + int(entropy/2)
min_non_zero = min(min_non_zero, abs_max_non_zero)
max_non_zero = min(max_non_zero, abs_max_non_zero)
max_non_zero = max(min_non_zero, max_non_zero)
num_non_zero = random.randint(min_non_zero, max_non_zero)
while len(indices) < num_non_zero:
indices.add(tuple(random_index()))
coeffs = np.zeros(degrees + 1, dtype=np.int64)
entropies = entropy * np.random.dirichlet(np.ones(num_non_zero))
for index, entry_entropy in zip(indices, entropies):
value = number.integer(entry_entropy, signed=True, min_abs=1)
coeffs.itemset(index, value)
return coeffs
def expand_coefficients(coefficients, entropy, length=None):
"""Expands coefficients to multiple terms that sum to each coefficient.
Args:
coefficients: Array, such that `coefficients[i, j, ..., k]` is the
coefficient of x**i * y**j * ... * z**k.
entropy: Float >= 0; the entropy to use for generating extra randomness.
length: Number of terms that appear, e.g., 2x + 3 has two terms. If `None`
then a suitable length will be picked depending on the entropy
requested.
Returns:
Numpy object array with the same shape as `coefficients`, containing lists.
"""
coefficients = np.asarray(coefficients)
shape = coefficients.shape
expanded_coefficients = np.empty(shape, dtype=np.object)
min_length = np.count_nonzero(coefficients) + 2
if length is None:
max_length = min_length + int(math.ceil(entropy) / 2)
length = random.randint(min_length, max_length)
if length < min_length:
length = min_length
is_zero_flat = np.reshape(coefficients, [-1]) == 0
counts = expanded_coefficient_counts(length, is_zero=is_zero_flat)
coeffs_entropy = entropy * np.random.dirichlet(np.maximum(1e-9, counts - 1))
counts = np.reshape(counts, shape)
coeffs_entropy = np.reshape(coeffs_entropy, shape)
indices = list(zip(*np.indices(shape).reshape([len(shape), -1])))
for power in indices:
coeffs = integers_with_sum(
value=coefficients.item(power),
count=counts.item(power),
entropy=coeffs_entropy.item(power))
expanded_coefficients.itemset(power, coeffs)
return expanded_coefficients
def sample_expanded_coefficients(degrees, entropy, length=None):
"""Convenience function: samples and expands coeffs, entropy split equally."""
coefficients = sample_coefficients(degrees, entropy/2, max_non_zero=length)
return expand_coefficients(coefficients, entropy/2, length)
def coefficients_to_polynomial(coefficients, variables):
"""Converts array of lists of coefficients to a polynomial."""
coefficients = np.asarray(coefficients)
shape = coefficients.shape
indices = list(zip(*np.indices(shape).reshape([len(shape), -1])))
monomials = []
for power in indices:
coeffs = coefficients.item(power)
if (number.is_integer_or_rational(coeffs)
or isinstance(coeffs, sympy.Symbol)):
coeffs = [coeffs]
elif not isinstance(coeffs, list):
raise ValueError('Unrecognized coeffs={} type={}'
.format(coeffs, type(coeffs)))
for coeff in coeffs:
monomials.append(monomial(coeff, variables, power))
random.shuffle(monomials)
return ops.Add(*monomials)
def sample(variables, degrees, entropy, length=None):
coefficients = sample_expanded_coefficients(degrees, entropy, length)
return coefficients_to_polynomial(coefficients, variables)
def add_coefficients(coeffs1, coeffs2):
"""Adds together two sets of coefficients over same set of variables."""
coeffs1 = np.asarray(coeffs1)
coeffs2 = np.asarray(coeffs2)
degrees1 = np.array(coeffs1.shape)
degrees2 = np.array(coeffs2.shape)
assert len(degrees1) == len(degrees2)
extra1 = np.maximum(0, degrees2 - degrees1)
extra2 = np.maximum(0, degrees1 - degrees2)
pad1 = [(0, extra) for extra in extra1]
pad2 = [(0, extra) for extra in extra2]
coeffs1 = np.pad(coeffs1, pad1, 'constant', constant_values=0)
coeffs2 = np.pad(coeffs2, pad2, 'constant', constant_values=0)
return coeffs1 + coeffs2
def _random_factor(integer):
factors = sympy.factorint(integer)
result = 1
for factor, power in six.iteritems(factors):
result *= factor ** random.randint(0, power)
return result
def coefficients_linear_split(coefficients, entropy):
"""Finds two sets of coefficients and multipliers summing to `coefficients`.
Given `coefficients` (an integer vector), will sample integers `a, b`, and
two sets of coefficients `coefficients_1, coefficients_2`, such that
`a * coefficients_1 + b * coefficients_2 == coefficients`.
Args:
coefficients: Array of coefficients.
entropy: Float >= 0; the amount of randomness used to sample.
Returns:
Tuple (a, b, coefficients_1, coefficients_2)`.
"""
coefficients = np.asarray(coefficients)
coefficients_shape = coefficients.shape
coefficients = np.reshape(coefficients, [-1])
entropy_a = max(1, random.uniform(0, entropy/3))
entropy_b = max(1, random.uniform(0, entropy/3))
entropy -= entropy_a + entropy_b
entropy_coefficients = entropy * np.random.dirichlet(
np.ones(len(coefficients)))
# For each target coefficient z, we are required to solve the linear
# Diophantine equation a*x + b*y = c. Bezout's theorem: this has a solution if
# and only if gcd(a, b) divides c.
# Thus to be solvable for all coefficients, a and b must be chosen such that
# gcd(a, b) divides the gcd of the coefficients.
coefficients_gcd = sympy.gcd([i for i in coefficients])
coefficients_gcd = max(1, abs(coefficients_gcd))
a = number.integer(entropy_a, signed=True, min_abs=1)
b = number.integer(entropy_b, signed=True, min_abs=1, coprime_to=a)
b *= _random_factor(coefficients_gcd)
if random.choice([False, True]):
a, b = b, a
coefficients_1 = np.zeros(coefficients.shape, dtype=np.object)
coefficients_2 = np.zeros(coefficients.shape, dtype=np.object)
for index, coefficient in enumerate(coefficients):
entropy_coeff = entropy_coefficients[index]
t = number.integer(entropy_coeff, signed=True)
x, y = diophantine_solve_linear_2d(c=coefficient, a=a, b=b, t=t)
coefficients_1[index] = x
coefficients_2[index] = y
# Prevent all coefficients from being zero.
while np.all(coefficients_1 == 0) or np.all(coefficients_2 == 0):
index = random.randint(0, len(coefficients) - 1)
scale = random.choice([-1, 1])
coefficients_1[index] += scale * b
coefficients_2[index] -= scale * a
coefficients_1 = np.reshape(coefficients_1, coefficients_shape)
coefficients_2 = np.reshape(coefficients_2, coefficients_shape)
return a, b, coefficients_1, coefficients_2
def _degree_of_variable(polynomial, variable):
polynomial = sympy.sympify(polynomial).expand()
if polynomial.is_constant():
return 0
polynomial = sympy.poly(polynomial)
if variable not in polynomial.free_symbols:
return 0
return polynomial.degree(variable)
def _sample_with_brackets(depth, variables, degrees, entropy, length,
force_brackets=True):
"""Internal recursive function for: constructs a polynomial with brackets."""
# To generate arbitrary polynomial recursively, can do one of:
# * add two polynomials, with at least one having brackets.
# * multiply two polynomials.
# * call `sample` (i.e., polynomial without brackets).
if force_brackets:
length = max(2, length)
if not force_brackets and (random.choice([False, True]) or length < 2):
return sample(variables, degrees, entropy, length)
length_left = random.randint(1, length - 1)
length_right = length - length_left
entropy_left, entropy_right = entropy * np.random.dirichlet(
[length_left, length_right])
if random.choice([False, True]):
# Add two. Force brackets on at least one of the polynomials, and sample
# repeatedly until we don't get cancellation.
while True:
left = _sample_with_brackets(
depth + 1, variables, degrees, entropy_left, length_left, True)
right = _sample_with_brackets(
depth + 1, variables, degrees, entropy_right, length_right, False)
if random.choice([False, True]):
left, right = right, left
result = ops.Add(left, right)
all_ok = True
for variable, degree in zip(variables, degrees):
if _degree_of_variable(result, variable) != degree:
all_ok = False
break
if all_ok:
return result
else:
# Multiply two.
def sample_with_zero_check(degrees_, entropy_, length_):
while True:
result = _sample_with_brackets(
depth + 1, variables, degrees_, entropy_, length_, False)
if degrees_.sum() > 0 or not result.sympy().is_zero:
return result
degrees = np.asarray(degrees)
def sample_degree(max_degree):
"""Select in range [0, max_degree], biased away from ends."""
if max_degree <= 1 or random.choice([False, True]):
return random.randint(0, max_degree)
return random.randint(1, max_degree - 1)
degrees_left = np.array([sample_degree(degree) for degree in degrees])
degrees_right = degrees - degrees_left
left = sample_with_zero_check(degrees_left, entropy_left, length_left)
right = sample_with_zero_check(degrees_right, entropy_right, length_right)
return ops.Mul(left, right)
def sample_with_brackets(variables, degrees, entropy, length=None):
"""Constructs a polynomial with brackets.
Args:
variables: List of variables to use.
degrees: Max degrees of variables. This function guarantees that these will
be obtained in the returned polynomial.
entropy: Float >= 0; the randomness to use in generating the polynomial.
length: Optional integer containing number of terms. If `None` then an
appropriate one will be generated depending on the entropy.
Returns:
Instance of `ops.Op` containing the polynomial.
"""
if isinstance(degrees, int):
degrees = [degrees]
if not isinstance(variables, (list, tuple)):
variables = [variables]
if length is None:
length = 3 + random.randint(0, int(entropy/2))
# Add on some entropy to compensate for different expressions generating the
# same apparent polynomial.
entropy += combinatorics.log_number_binary_trees(length) / math.log(10)
return _sample_with_brackets(0, variables, degrees, entropy, length, True)
def sample_with_small_evaluation(variable, degree, max_abs_input, entropy):
"""Generates a (canonically ordered) polynomial, with bounded evaluation.
The coefficients are chosen to make use of the entropy, with the scaling
adjusted so that all give roughly the same contribution to the output of the
polynomial when the input is bounded in magnitude by `max_abs_input`.
Args:
variable: Variable to use in polynomial.
degree: Degree of polynomial.
max_abs_input: Number >= 1; max absolute value of input.
entropy: Float; randomness for generating polynomial.
Returns:
Instance of `ops.Add`.
"""
assert max_abs_input >= 1
entropies = entropy * np.random.dirichlet(np.ones(degree + 1))
coeffs = []
for power in range(degree + 1):
# This scaling guarantees that the terms give roughly equal contribution
# to the typical magnitude of the polynomial when |input| <= max_abs_input.
delta = 0.5 * (degree - 2 * power) * math.log10(max_abs_input)
power_entropy = entropies[power] + delta
min_abs = 1 if power == degree else 0
coeff = number.integer(power_entropy, signed=True, min_abs=min_abs)
coeffs.append(coeff)
terms = [monomial(coeff, variable, power)
for power, coeff in enumerate(coeffs)]
return ops.Add(*terms)
def sample_messy_power(variable, entropy):
"""Returns unsimplified power expression like ((x**2)**3/x**4)**2/x**3."""
if entropy <= 0:
return variable
which = random.choice([1, 2, 3])
if which == 1:
exponent_entropy = min(2, entropy)
entropy -= exponent_entropy
exponent = number.integer_or_rational(exponent_entropy, signed=True)
left = sample_messy_power(variable, entropy)
return ops.Pow(left, exponent)
entropy_left = entropy / 2
if entropy_left < 1:
entropy_left = 0
entropy_right = entropy - entropy_left
if random.choice([False, True]):
entropy_left, entropy_right = entropy_right, entropy_left
left = sample_messy_power(variable, entropy_left)
right = sample_messy_power(variable, entropy_right)
if which == 2:
return ops.Mul(left, right)
else:
return ops.Div(left, right)
def trim(coefficients):
"""Makes non-zero entry in the final slice along each axis."""
coefficients = np.asarray(coefficients)
non_zero = np.not_equal(coefficients, 0)
ndim = coefficients.ndim
for axis in range(ndim):
length = coefficients.shape[axis]
axis_complement = list(range(0, axis)) + list(range(axis + 1, ndim))
non_zero_along_axis = np.any(non_zero, axis=tuple(axis_complement))
slice_to = 0
for index in range(length - 1, -1, -1):
if non_zero_along_axis[index]:
slice_to = index + 1
break
if slice_to < length:
coefficients = coefficients.take(axis=axis, indices=list(range(slice_to)))
return coefficients
def differentiate(coefficients, axis):
"""Differentiate coefficients (corresponding to polynomial) along axis."""
coefficients = np.asarray(coefficients)
indices = list(range(1, coefficients.shape[axis]))
coefficients = coefficients.take(axis=axis, indices=indices)
broadcast_shape = np.ones(coefficients.ndim, dtype=np.int32)
broadcast_shape[axis] = len(indices)
broadcast = np.asarray(indices).reshape(broadcast_shape)
result = broadcast * coefficients
return trim(result)
def integrate(coefficients, axis):
"""Integrate coefficients (corresponding to polynomial) along axis."""
coefficients = np.asarray(coefficients)
length = coefficients.shape[axis]
broadcast_shape = np.ones(coefficients.ndim, dtype=np.int32)
broadcast_shape[axis] = length
powers = np.array([sympy.Integer(i) for i in range(1, length + 1)])
powers = powers.reshape(broadcast_shape)
result_unpadded = coefficients / powers
pad = [(1 if i == axis else 0, 0) for i in range(coefficients.ndim)]
return np.pad(result_unpadded, pad, 'constant', constant_values=0)
| mathematics_dataset-master | mathematics_dataset/sample/polynomials.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathematical operations used to build up expressions for printing.
We can't use sympy because sympy will automatically simplify many types of
expressions, even with `evaluate=False` passed in. For example:
* Mul(-2, -3, evaluate=False) gives -(-6), not (-2) x (-3).
* Add(2, 1, evaluate=False) gives 1 + 2, because the terms are sorted.
As such, it's easier just to work with our own op classes that display precisely
as we created them. This also allows us to use custom symbols for the
expressions, such as the multiplication symbol.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
# Dependency imports
from absl import logging
from mathematics_dataset.sample import number
from mathematics_dataset.util import display
import numpy as np
import six
from six.moves import zip
import sympy
MUL_SYMBOL = '*'
DIV_SYMBOL = '/'
POW_SYMBOL = '**'
GT_SYMBOL = '>'
LT_SYMBOL = '<'
GE_SYMBOL = '>='
LE_SYMBOL = '<='
EQ_SYMBOL = '='
NE_SYMBOL = '!='
# Operator precedence levels. Used to insert brackets if necessary.
_EQ_PRECEDENCE = 0
_CONSTANT_PRECEDENCE = 1
_POW_PRECEDENCE = 2
_SQRT_PRECEDENCE = 3
_MUL_PRECEDENCE = 4
_ADD_PRECEDENCE = 5
def bracketed(child, parent, bracket_if_same_precedence):
"""Returns string representation of `child`, possibly bracketed.
Args:
child: Instance of `Op` or a valid value for `ConstantOp`.
parent: Instance of `Op`. Used to determine whether `child` needs to be
bracketed first before appearing in the parent op's expression.
bracket_if_same_precedence: Whether to bracket if the child has the same
operator precedence as the parent.
Returns:
String representation of `child`.
"""
if not isinstance(child, Op):
child = Constant(child)
child_precedence = child.precedence
parent_precedence = parent.precedence
if (parent_precedence > child_precedence
or (parent_precedence == child_precedence
and not bracket_if_same_precedence)):
return str(child)
else:
return '({})'.format(child)
def _flatten(iterable):
"""Returns list."""
if isinstance(iterable, (list, tuple)):
result = list(iterable)
else:
assert isinstance(iterable, dict)
keys = sorted(six.iterkeys(iterable))
result = [iterable[key] for key in keys]
# Check we don't have any hierarchy in the structure (otherwise would need
# to use something recursive like tf.contrib.framework.nest.flatten).
for item in result:
assert not isinstance(item, (list, tuple, dict))
return result
def _pack_sequence_as(example, flat):
if isinstance(example, list) or isinstance(example, tuple):
return flat
else:
assert isinstance(example, dict)
keys = sorted(six.iterkeys(example))
return {key: value for key, value in zip(keys, flat)}
@six.add_metaclass(abc.ABCMeta)
class Op(object):
"""An operation.
This needs to support being transformed into sympy (and possibly in the future
other types such as an appropriately formatted string), when given the op
arguments.
"""
def __init__(self, children):
"""Initialize this `Op` base class.
Args:
children: Iterable structure containing child ops.
"""
assert isinstance(children, (list, dict, tuple))
flat_children = _flatten(children)
flat_children = [child if isinstance(child, Op) else Constant(child)
for child in flat_children]
children = _pack_sequence_as(children, flat_children)
self._children = children
@property
def children(self):
"""Returns iterable or dict over immediate children."""
return self._children
def descendants(self):
"""Returns list of all descendants (self, children, grandchildren, etc)."""
descendants = [self]
flat_children = _flatten(self._children)
for child in flat_children:
descendants += child.descendants()
return descendants
@abc.abstractmethod
def __str__(self):
"""Returns a string format of this op."""
@abc.abstractmethod
def sympy(self):
"""Returns the sympifcation of this op."""
def _sympy_(self):
"""Convenience method to automatically sympify this object."""
try:
return self.sympy()
except AttributeError as e:
# Note: we print this error here, before raising it again, because sympy
# will think `AttributeError` refers to this object not having a `_sympy_`
# method, rather than having it, which leads to otherwise confusing error
# messages.
logging.error(
'Encountered attribute error while trying to sympify: %s', e)
raise e
@abc.abstractproperty
def precedence(self):
"""Returns the precedence (integer) of this op."""
class Constant(Op):
"""Returns a constant value; a nullary op."""
def __init__(self, value):
super(Constant, self).__init__([])
if isinstance(value, six.integer_types):
value = sympy.Integer(value)
self._value = value
def __str__(self):
return str(self._value)
def sympy(self):
return self._value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def _is_simple(self):
"""Returns whether it's a simple number, rather than a division or neg."""
if isinstance(self._value, sympy.Symbol):
return True
elif (isinstance(self._value, int)
or isinstance(self._value, sympy.Integer)
or isinstance(self._value, display.Decimal)
or isinstance(self._value, np.int64)
or isinstance(self._value, np.int32)):
return self._value >= 0
elif isinstance(self._value, sympy.Rational):
return False
elif isinstance(self._value, sympy.Function):
return True
else:
raise ValueError('Unknown type {}'.format(type(self._value)))
@property
def precedence(self):
if self._is_simple():
return _CONSTANT_PRECEDENCE
else:
return _MUL_PRECEDENCE
class _SumLikeOp(Op):
"""Abstract op for sum-like terms which may contain negative entries."""
@abc.abstractmethod
def expanded_signs_and_terms(self):
"""Returns a list of arguments, plus any sub-arguments from sub-adds.
E.g., if this op is `Add(Add(2, Neg(3)), Mul(4, 5), 1)`, then will return
`[(True, 2), (False, 3), (True, Mul(4, 5)), (True, 1)]` (the arguments of
the inner add have been extracted).
"""
def __str__(self):
signs_and_terms = self.expanded_signs_and_terms()
if not signs_and_terms:
return '0'
for i, (sign, term) in enumerate(signs_and_terms):
if i == 0:
if sign:
expression = bracketed(term, self, True)
else:
expression = '-' + bracketed(term, self, True)
else:
if sign:
expression += ' + ' + bracketed(term, self, True)
else:
expression += ' - ' + bracketed(term, self, True)
return expression
class Identity(_SumLikeOp):
"""The identity op (a unitary op)."""
def __init__(self, input_):
super(Identity, self).__init__({'input': input_})
def expanded_signs_and_terms(self):
if isinstance(self.children['input'], _SumLikeOp):
return self.children['input'].expanded_signs_and_terms()
else:
return [(True, self.children['input'])]
def __str__(self):
return str(self.children['input'])
def sympy(self):
return self.children['input'].sympy()
@property
def precedence(self):
return self.children['input'].precedence
class Neg(_SumLikeOp):
"""Negation, a unary op. Also has special display when appearing in a sum."""
def __init__(self, arg):
super(Neg, self).__init__({'input': arg})
def expanded_signs_and_terms(self):
if isinstance(self.children['input'], _SumLikeOp):
inner_signs_and_terms = self.children['input'].expanded_signs_and_terms()
return [(not sign, term) for (sign, term) in inner_signs_and_terms]
else:
return [(False, self.children['input'])]
def sympy(self):
return -sympy.sympify(self.children['input'])
def inner(self):
return self.children['input']
@property
def precedence(self):
return _ADD_PRECEDENCE
class Add(_SumLikeOp):
"""Addition."""
def __init__(self, *args):
super(Add, self).__init__(args)
def expanded_signs_and_terms(self):
"""Returns a list of arguments, plus any sub-arguments from sub-adds.
E.g., if this op is `Add(Add(2, 3), Mul(4, 5), 1)`, then will return
`[2, 3, Mul(4, 5), 1]` (the arguments of the inner add have been extracted).
"""
expanded = []
for arg in self.children:
if isinstance(arg, _SumLikeOp):
expanded += arg.expanded_signs_and_terms()
else:
expanded.append((True, arg))
return expanded
def sympy(self):
return sympy.Add(*[sympy.sympify(arg) for arg in self.children])
@property
def precedence(self):
return _ADD_PRECEDENCE
class Sub(Op):
"""Subtraction."""
def __init__(self, left, right):
super(Sub, self).__init__({'left': left, 'right': right})
def __str__(self):
return (bracketed(self.children['left'], self, False) + ' - '
+ bracketed(self.children['right'], self, True))
def sympy(self):
return sympy.Add(
self.children['left'], sympy.Mul(-1, self.children['right']))
@property
def precedence(self):
return _ADD_PRECEDENCE
class Mul(Op):
"""Multiplication."""
def __init__(self, *args):
super(Mul, self).__init__(args)
def __str__(self):
if not self.children:
return '1'
else:
args = [bracketed(arg, self, False) for arg in self.children]
return MUL_SYMBOL.join(args)
def sympy(self):
return sympy.Mul(*[sympy.sympify(arg) for arg in self.children])
@property
def precedence(self):
return _MUL_PRECEDENCE
class Div(Op):
"""Division."""
def __init__(self, numer, denom):
super(Div, self).__init__({'numer': numer, 'denom': denom})
def __str__(self):
return u'{}{}{}'.format(
bracketed(self.children['numer'], self, True), DIV_SYMBOL,
bracketed(self.children['denom'], self, True))
def sympy(self):
return sympy.Mul(
self.children['numer'], sympy.Pow(self.children['denom'], -1))
@property
def precedence(self):
return _MUL_PRECEDENCE
class Pow(Op):
"""Power a to the power b."""
def __init__(self, a, b):
super(Pow, self).__init__({'a': a, 'b': b})
def __str__(self):
return u'{}{}{}'.format(
bracketed(self.children['a'], self, True), POW_SYMBOL,
bracketed(self.children['b'], self, True))
def sympy(self):
return sympy.Pow(
sympy.sympify(self.children['a']), sympy.sympify(self.children['b']))
@property
def precedence(self):
return _POW_PRECEDENCE
class Sqrt(Op):
"""Square root of a value."""
def __init__(self, a):
super(Sqrt, self).__init__({'a': a})
def __str__(self):
return 'sqrt({})'.format(self.children['a'])
def sympy(self):
return sympy.sqrt(self.children['a'])
@property
def precedence(self):
return _POW_PRECEDENCE
class Eq(Op):
"""Equality."""
def __init__(self, left, right):
super(Eq, self).__init__({'left': left, 'right': right})
def __str__(self):
return '{} = {}'.format(self.children['left'], self.children['right'])
def sympy(self):
return sympy.Eq(self.children['left'], self.children['right'])
@property
def precedence(self):
return _EQ_PRECEDENCE
def number_constants(expressions):
"""Returns list of integer, rational, decimal constants in the expressions."""
if isinstance(expressions, Op):
expressions = [expressions]
descendants = []
for expression in expressions:
descendants += expression.descendants()
candidate_constants = [op for op in descendants if isinstance(op, Constant)]
return [constant for constant in candidate_constants
if number.is_integer_or_rational_or_decimal(constant.value)]
| mathematics_dataset-master | mathematics_dataset/sample/ops.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.sample.number."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from mathematics_dataset.sample import number
from six.moves import range
import sympy
class NumberTest(parameterized.TestCase):
def testCoprimeDensity(self):
self.assertEqual(number._coprime_density(1), 1.0)
self.assertEqual(number._coprime_density(2), 0.5)
self.assertLess(abs(number._coprime_density(3) - 2/3), 1e-6)
self.assertLess(abs(number._coprime_density(6) - 1/3), 1e-6)
@parameterized.parameters(False, True)
def testInteger_allowZero(self, signed):
saw_zero = False
saw_nonzero = False
for _ in range(1000):
sample = number.integer(1, signed=signed)
if sample == 0:
saw_zero = True
else:
saw_nonzero = True
if saw_zero and saw_nonzero:
break
self.assertTrue(saw_zero)
self.assertTrue(saw_nonzero)
def testNonIntegerRational(self):
for _ in range(1000):
entropy = random.uniform(0, 10)
signed = random.choice([False, True])
sample = number.non_integer_rational(entropy, signed)
self.assertNotEqual(sympy.denom(sample), 1)
@parameterized.parameters(False, True)
def testIntegerOrRational(self, signed):
# Tests we can call it. Do it a few times so both code paths get executed.
for _ in range(10):
number.integer_or_rational(2, signed)
def testNonIntegerDecimal(self):
for _ in range(1000):
sample = number.non_integer_decimal(1, False)
self.assertNotEqual(sympy.denom(sample), 1)
self.assertLen(str(sample), 3) # should be of form "0.n"
self.assertGreater(sample, 0) # positive
def testNonIntegerDecimal_size(self):
saw_bigger_one = False
saw_smaller_one = False
for _ in range(1000):
sample = number.non_integer_decimal(2, False)
if sample > 1:
saw_bigger_one = True
else:
saw_smaller_one = True
if saw_bigger_one and saw_smaller_one:
break
self.assertTrue(saw_bigger_one)
self.assertTrue(saw_smaller_one)
@parameterized.parameters(
lambda: number.integer(0, True),
lambda: number.integer(1, True),
lambda: number.non_integer_rational(2, True),
lambda: number.non_integer_decimal(1, True))
def testGenerate_signed(self, generator):
saw_positive = False
saw_negative = False
for _ in range(1000):
sample = generator()
saw_positive |= sample > 0
saw_negative |= sample < 0
if saw_positive and saw_negative:
break
self.assertTrue(saw_positive)
self.assertTrue(saw_negative)
@parameterized.parameters(
lambda: number.integer(2, False),
lambda: number.non_integer_rational(2, False))
def testIntegerRational_distinctCount(self, generator):
seen = set()
for _ in range(3000):
seen.add(generator())
self.assertGreaterEqual(len(seen), 10 ** 2)
@parameterized.parameters(number.integer, number.non_integer_decimal)
def testEntropyOfValue(self, generator):
for entropy in [1, 2, 4, 8, 16]:
sum_entropy = 0.0
count = 2000
for _ in range(count):
value = generator(entropy, signed=True)
sum_entropy += number.entropy_of_value(value)
avg_entropy = sum_entropy / count
error = abs(entropy - avg_entropy) / entropy
self.assertLess(error, 0.2)
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/sample/number_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.sample.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.sample import ops
from six.moves import range
import sympy
class OpsTest(absltest.TestCase):
def testNeg(self):
op = ops.Neg(2)
self.assertEqual(str(op), '-2')
self.assertEqual(op.sympy(), -2)
op = ops.Add(ops.Neg(2), 3)
self.assertEqual(str(op), '-2 + 3')
self.assertEqual(op.sympy(), 1)
op = ops.Add(3, ops.Neg(2))
self.assertEqual(str(op), '3 - 2')
self.assertEqual(op.sympy(), 1)
op = ops.Add(ops.Add(ops.Neg(2), 5), 3)
self.assertEqual(str(op), '-2 + 5 + 3')
self.assertEqual(op.sympy(), 6)
op = ops.Add(3, ops.Add(ops.Identity(ops.Neg(2)), 5))
self.assertEqual(str(op), '3 - 2 + 5')
self.assertEqual(op.sympy(), 6)
op = ops.Add(3, ops.Add(2, ops.Neg(5)))
self.assertEqual(str(op), '3 + 2 - 5')
self.assertEqual(op.sympy(), 0)
def testAdd(self):
add = ops.Add()
self.assertEqual(str(add), '0')
self.assertEqual(add.sympy(), 0)
add = ops.Add(2, 3)
self.assertEqual(str(add), '2 + 3')
self.assertEqual(add.sympy(), 5)
add = ops.Add(ops.Add(1, 2), 3)
self.assertEqual(str(add), '1 + 2 + 3')
self.assertEqual(add.sympy(), 6)
def testSub(self):
sub = ops.Sub(2, 3)
self.assertEqual(str(sub), '2 - 3')
self.assertEqual(sub.sympy(), -1)
sub = ops.Sub(ops.Sub(1, 2), 3)
self.assertEqual(str(sub), '1 - 2 - 3')
self.assertEqual(sub.sympy(), -4)
sub = ops.Sub(1, ops.Sub(2, 3))
self.assertEqual(str(sub), '1 - (2 - 3)')
self.assertEqual(sub.sympy(), 2)
sub = ops.Sub(ops.Neg(1), 2)
self.assertEqual(str(sub), '-1 - 2')
self.assertEqual(sub.sympy(), -3)
def testMul(self):
mul = ops.Mul()
self.assertEqual(str(mul), '1')
self.assertEqual(mul.sympy(), 1)
mul = ops.Mul(2, 3)
self.assertEqual(str(mul), '2*3')
self.assertEqual(mul.sympy(), 6)
mul = ops.Mul(ops.Identity(ops.Constant(-2)), 3)
self.assertEqual(str(mul), '-2*3')
self.assertEqual(mul.sympy(), -6)
mul = ops.Mul(ops.Add(1, 2), 3)
self.assertEqual(str(mul), '(1 + 2)*3')
self.assertEqual(mul.sympy(), 9)
mul = ops.Mul(ops.Mul(2, 3), 5)
self.assertEqual(str(mul), '2*3*5')
self.assertEqual(mul.sympy(), 30)
# TODO(b/124038946): reconsider how we want brackets in these cases:
# mul = ops.Mul(ops.Div(2, 3), 5)
# self.assertEqual(str(mul), '(2/3)*5')
# self.assertEqual(mul.sympy(), sympy.Rational(10, 3))
#
# mul = ops.Mul(sympy.Rational(2, 3), 5)
# self.assertEqual(str(mul), '(2/3)*5')
# self.assertEqual(mul.sympy(), sympy.Rational(10, 3))
def testDiv(self):
div = ops.Div(2, 3)
self.assertEqual(str(div), '2/3')
self.assertEqual(div.sympy(), sympy.Rational(2, 3))
div = ops.Div(2, sympy.Rational(4, 5))
self.assertEqual(str(div), '2/(4/5)')
self.assertEqual(div.sympy(), sympy.Rational(5, 2))
div = ops.Div(1, ops.Div(2, 3))
self.assertEqual(str(div), '1/(2/3)')
self.assertEqual(div.sympy(), sympy.Rational(3, 2))
div = ops.Div(ops.Div(2, 3), 4)
self.assertEqual(str(div), '(2/3)/4')
self.assertEqual(div.sympy(), sympy.Rational(1, 6))
div = ops.Div(2, ops.Mul(3, 4))
self.assertEqual(str(div), '2/(3*4)')
div = ops.Div(2, sympy.Function('f')(sympy.Symbol('x')))
self.assertEqual(str(div), '2/f(x)')
def testPow(self):
pow_ = ops.Pow(2, 3)
self.assertEqual(str(pow_), '2**3')
self.assertEqual(pow_.sympy(), 8)
pow_ = ops.Pow(4, sympy.Rational(1, 2))
self.assertEqual(str(pow_), '4**(1/2)')
self.assertEqual(pow_.sympy(), 2)
pow_ = ops.Pow(sympy.Rational(1, 2), 3)
self.assertEqual(str(pow_), '(1/2)**3')
self.assertEqual(pow_.sympy(), 1/8)
pow_ = ops.Pow(3, ops.Pow(2, 1))
self.assertEqual(str(pow_), '3**(2**1)')
self.assertEqual(pow_.sympy(), 9)
pow_ = ops.Pow(ops.Pow(2, 3), 4)
self.assertEqual(str(pow_), '(2**3)**4')
self.assertEqual(pow_.sympy(), 4096)
pow_ = ops.Pow(-5, 2)
self.assertEqual(str(pow_), '(-5)**2')
self.assertEqual(pow_.sympy(), 25)
def testEq(self):
op = ops.Eq(ops.Add(2, 3), 4)
self.assertEqual(str(op), '2 + 3 = 4')
self.assertEqual(op.sympy(), False)
def testDescendants(self):
constants = [ops.Constant(i) for i in range(6)]
# (1 + 2*3**4) / 5 - 6
expression = ops.Sub(
ops.Div(
ops.Add(
constants[0],
ops.Mul(
constants[1],
ops.Pow(
constants[2],
constants[3]))),
constants[4]),
constants[5])
descendants = expression.descendants()
descendants = ops._flatten(descendants)
for constant in constants:
self.assertIn(constant, descendants)
self.assertEqual(descendants.count(constant), 1)
# Also test top-level.
self.assertEqual(constants[0].descendants(), [constants[0]])
# Also general structure.
constant = ops.Constant(3)
expression = ops.Neg(constant)
self.assertEqual(set(expression.descendants()), set([constant, expression]))
def testNumberConstants(self):
constant = ops.Constant(3)
expression = ops.Neg(constant)
constants = ops.number_constants([expression])
self.assertEqual(constants, [constant])
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/sample/ops_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.sample.polynomials."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from absl.testing import parameterized
from mathematics_dataset.sample import polynomials
import numpy as np
from six.moves import range
import sympy
import tensorflow as tf
class ExpressionWithValueTest(tf.test.TestCase, parameterized.TestCase):
def testSplitValueEqually(self):
split = polynomials._split_value_equally(3, 2)
self.assertEqual(split, [1, 2])
split = polynomials._split_value_equally(sympy.sympify('3/4'), 2)
self.assertEqual(split, [sympy.sympify('1/4'), sympy.sympify('1/2')])
def testIntegersWithSum(self):
value = 13
count = 10
terms = polynomials.integers_with_sum(value=value, count=count, entropy=4.0)
self.assertLen(terms, count)
self.assertEqual(sum(terms), value)
def testMonomial(self):
x, y = sympy.symbols('x y')
self.assertEqual(str(polynomials.monomial(1, [x, y], [2, 3])), 'x**2*y**3')
# TODO(b/124038530): how handle rational coefficients; are they even used?
# self.assertEqual(
# str(polynomials.monomial(sympy.Rational(2, 3), [x], [1])), '2*x/3')
# self.assertEqual(
# str(polynomials.monomial(sympy.Rational(1, 3), [x], [1])), 'x/3')
self.assertEqual(str(polynomials.monomial(x, [y], [4])), 'x*y**4')
def testExpandCoefficients(self):
for _ in range(10):
num_variables = np.random.randint(1, 4)
degrees = np.random.randint(0, 4, [num_variables])
coefficients = np.random.randint(-3, 3, degrees + 1)
entropy = np.random.uniform(0, 10)
expanded = polynomials.expand_coefficients(coefficients, entropy)
collapsed = np.vectorize(sum)(expanded)
self.assertAllEqual(coefficients, collapsed)
def testCoefficientsToPolynomial(self):
coeffs = [3, 2, 1]
x = sympy.Symbol('x')
polynomial = polynomials.coefficients_to_polynomial(coeffs, [x])
polynomial = sympy.sympify(polynomial)
self.assertEqual(polynomial, x*x + 2*x + 3)
def testUnivariate(self):
# Test generation for: x**2 + 2*x + 1
x = sympy.Symbol('x')
coeffs = [1, 2, 3]
for _ in range(10):
expanded = polynomials.expand_coefficients(coeffs, 5.0)
polynomial = polynomials.coefficients_to_polynomial(expanded, [x])
sympified = sympy.sympify(polynomial)
self.assertEqual(sympified, 1 + 2*x + 3*x*x)
def testMultivariate(self):
# Test generation for: x**2 + 2*x*y + 3*y**2 - x + 5
x, y = sympy.symbols('x y')
coeffs = [[5, 0, 3], [-1, 2, 0], [1, 0, 0]]
for _ in range(10):
expanded = polynomials.expand_coefficients(coeffs, 5.0, length=10)
polynomial = polynomials.coefficients_to_polynomial(expanded, [x, y])
sympified = sympy.sympify(polynomial)
self.assertEqual(sympified, x*x + 2*x*y + 3*y*y - x + 5)
def testAddCoefficients(self):
# Add x**2 + 2*y and 3*x + 4*y**3.
coeffs1 = [[0, 2], [0, 0], [1, 0]]
coeffs2 = [[0, 0, 0, 4], [3, 0, 0, 0]]
target = [[0, 2, 0, 4], [3, 0, 0, 0], [1, 0, 0, 0]]
actual = polynomials.add_coefficients(coeffs1, coeffs2)
self.assertAllEqual(target, actual)
def testCoefficientsLinearSplit(self):
for degree in range(3):
for ndims in range(3):
for _ in range(10):
coefficients = np.random.randint(-5, 5, [degree + 1] * ndims)
entropy = random.uniform(1, 4)
c1, c2, coeffs1, coeffs2 = polynomials.coefficients_linear_split(
coefficients, entropy)
c1 = int(c1)
c2 = int(c2)
coeffs1 = np.asarray(coeffs1, dtype=np.int32)
coeffs2 = np.asarray(coeffs2, dtype=np.int32)
sum_ = c1 * coeffs1 + c2 * coeffs2
self.assertAllEqual(sum_, coefficients)
def testSampleWithBrackets(self):
x, y = sympy.symbols('x y')
for _ in range(100):
degrees = np.random.randint(1, 4, [2])
entropy = random.uniform(0, 4)
polynomial = polynomials.sample_with_brackets(
variables=[x, y], degrees=degrees, entropy=entropy)
self.assertIn('(', str(polynomial))
poly = sympy.poly(sympy.sympify(polynomial).expand())
self.assertEqual(poly.degree(x), degrees[0])
self.assertEqual(poly.degree(y), degrees[1])
def testTrim(self):
self.assertAllEqual(polynomials.trim([1]), [1])
self.assertAllEqual(polynomials.trim([1, 0]), [1])
self.assertAllEqual(polynomials.trim([0, 1]), [0, 1])
self.assertAllEqual(polynomials.trim([0]), [])
self.assertAllEqual(polynomials.trim([0, 0]), [])
def testDifferentiate_univariate(self):
coeffs = [5, 3, 2]
expected = [3, 4]
actual = polynomials.differentiate(coeffs, 0)
self.assertAllEqual(expected, actual)
def testDifferentiate_multivariate(self):
coeffs = [[0, 3, 1], [5, 0, 0], [0, 2, 0]]
expected = [[5, 0], [0, 4]]
actual = polynomials.differentiate(coeffs, 0)
self.assertAllEqual(expected, actual)
def testIntegrate_univariate(self):
coeffs = [5, 3, 2]
expected = [0, 5, sympy.Rational(3, 2), sympy.Rational(2, 3)]
actual = polynomials.integrate(coeffs, 0)
self.assertAllEqual(expected, actual)
def testIntegrate_multivariate(self):
coeffs = [[0, 1], [1, 0]]
expected = [[0, 0, sympy.Rational(1, 2)], [0, 1, 0]]
actual = polynomials.integrate(coeffs, 1)
self.assertAllEqual(expected, actual)
if __name__ == '__main__':
tf.test.main()
| mathematics_dataset-master | mathematics_dataset/sample/polynomials_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.sample.arithmetic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from mathematics_dataset.sample import arithmetic
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from six.moves import range
import sympy
class ArithmeticTest(parameterized.TestCase):
def testArithmetic(self):
for _ in range(1000):
target = number.integer_or_rational(4, signed=True)
entropy = 8.0
expression = arithmetic.arithmetic(target, entropy)
self.assertEqual(sympy.sympify(expression), target)
def testArithmeticLength(self):
"""Tests that the generated arithmetic expressions have given length."""
for _ in range(1000):
target = number.integer_or_rational(4, signed=True)
entropy = 8.0
length = random.randint(2, 10)
expression = arithmetic.arithmetic(target, entropy, length)
# Note: actual length is #ops = #numbers - 1.
actual_length = len(ops.number_constants(expression)) - 1
self.assertEqual(actual_length, length)
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/sample/arithmetic_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arithmetic, e.g., "calculate 2+3"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import arithmetic
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
_ENTROPY_EXTRAPOLATE = (10, 12)
_ADD_SUB_ENTROPY_TRAIN = (4, 16)
_ADD_SUB_ENTROPY_INTERPOLATE = (12, 12)
_ADD_SUB_ENTROPY_EXTRAPOLATE = (16, 20)
# In arithmetic expressions:
_EXTRAPOLATE_EXTRA_LENGTH = 3
_INT = 'int'
_INT_OR_RATIONAL = 'rational'
def _make_modules(entropy, add_sub_entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
add_sub_sample_args_pure = composition.PreSampleArgs(1, 1, *add_sub_entropy)
# TODO(b/124039105): consider composed modules?
return {
# Addition and subtraction of integers (and decimals)
'add_or_sub': functools.partial(
add_or_sub, None, add_sub_sample_args_pure),
'add_sub_multiple': functools.partial(
add_sub_multiple, _INT, sample_args_pure),
'add_or_sub_in_base': functools.partial(
add_or_sub_in_base, sample_args_pure),
# Multiplication and division
'mul': functools.partial(mul, None, sample_args_pure),
'div': functools.partial(div, None, sample_args_pure),
'mul_div_multiple': functools.partial(
mul_div_multiple, _INT_OR_RATIONAL, sample_args_pure),
# All together!
'mixed': functools.partial(mixed, _INT_OR_RATIONAL, sample_args_pure),
# And some other arithmetic-related stuff.
'nearest_integer_root': functools.partial(
nearest_integer_root, sample_args_pure),
'simplify_surd': functools.partial(simplify_surd, None, sample_args_pure),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(
entropy=entropy_fn(_ENTROPY_TRAIN),
add_sub_entropy=entropy_fn(_ADD_SUB_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(
entropy=_ENTROPY_INTERPOLATE,
add_sub_entropy=_ADD_SUB_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
sample_args_pure = composition.PreSampleArgs(1, 1, *_ENTROPY_EXTRAPOLATE)
add_sub_sample_args_pure = composition.PreSampleArgs(
1, 1, *_ADD_SUB_ENTROPY_EXTRAPOLATE)
train_length = arithmetic.length_range_for_entropy(_ENTROPY_TRAIN[1])[1]
def extrapolate_length():
return random.randint(
train_length + 1, train_length + _EXTRAPOLATE_EXTRA_LENGTH)
def add_sub_multiple_longer():
return add_sub_multiple(_INT, sample_args_pure, length=extrapolate_length())
def mul_div_multiple_longer():
return mul_div_multiple(_INT, sample_args_pure, length=extrapolate_length())
def mixed_longer():
return mixed(_INT, sample_args_pure, length=extrapolate_length())
return {
'add_or_sub_big': functools.partial(
add_or_sub, None, add_sub_sample_args_pure),
'mul_big': functools.partial(mul, None, sample_args_pure),
'div_big': functools.partial(div, None, sample_args_pure),
'add_sub_multiple_longer': add_sub_multiple_longer,
'mul_div_multiple_longer': mul_div_multiple_longer,
'mixed_longer': mixed_longer,
}
def _value_sampler(value):
"""Returns sampler (e.g., number.integer) appropriate for `value`."""
if value == _INT or number.is_integer(value):
return functools.partial(number.integer, signed=True)
if value == _INT_OR_RATIONAL or isinstance(value, sympy.Rational):
return functools.partial(number.integer_or_rational, signed=True)
if isinstance(value, display.Decimal):
return functools.partial(number.integer_or_decimal, signed=True)
raise ValueError('Unrecognized value {} of type {}'
.format(value, type(value)))
def _add_question_or_entity(context, p, q, is_question):
"""Generates entity or question for adding p + q."""
value = p.value + q.value
if is_question:
template = random.choice([
'{p} + {q}',
'{p}+{q}',
'Work out {p} + {q}.',
'Add {p} and {q}.',
'Put together {p} and {q}.',
'Sum {p} and {q}.',
'Total of {p} and {q}.',
'Add together {p} and {q}.',
'What is {p} plus {q}?',
'Calculate {p} + {q}.',
'What is {p} + {q}?',
])
return example.Problem(
question=example.question(context, template, p=p, q=q),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
description='Let {self} = {p} + {q}.',
p=p, q=q)
def _sub_question_or_entity(context, p, q, is_question):
"""Generates entity or question for subtraction p - q."""
value = p.value - q.value
if is_question:
templates = [
'{p} - {q}',
'Work out {p} - {q}.',
'What is {p} minus {q}?',
'What is {p} take away {q}?',
'What is {q} less than {p}?',
'Subtract {q} from {p}.',
'Calculate {p} - {q}.',
'What is {p} - {q}?',
]
if sympy.Ge(p.value, q.value):
# We calculate p - q, so the difference (|p - q|) is the correct answer.
for adjective in ['distance', 'difference']:
for pair in ['{p} and {q}', '{q} and {p}']:
templates.append('What is the {} between {}?'.format(adjective, pair))
template = random.choice(templates)
return example.Problem(
question=example.question(context, template, p=p, q=q),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
description='Let {self} = {p} - {q}.',
p=p, q=q)
def _entropy_for_pair(entropy):
entropy_1 = max(1, random.uniform(0, entropy))
entropy_2 = max(1, entropy - entropy_1)
return entropy_1, entropy_2
@composition.module(number.is_integer_or_rational_or_decimal)
def add_or_sub(value, sample_args, context=None):
"""Module for adding or subtracting two values."""
is_question = context is None
if context is None:
context = composition.Context()
is_addition = random.choice([False, True])
entropy, sample_args = sample_args.peel()
if value is None:
entropy_p, entropy_q = _entropy_for_pair(entropy)
p = number.integer_or_decimal(entropy_p, signed=True)
q = number.integer_or_decimal(entropy_q, signed=True)
else:
entropy = max(entropy, number.entropy_of_value(value))
sampler = _value_sampler(value)
p = sampler(entropy)
if is_addition:
q = value - p
# Maybe swap for symmetry.
if random.choice([False, True]):
p, q = q, p
else:
q = p - value
# Maybe swap for symmetry.
if random.choice([False, True]):
p, q = -q, -p
p, q = context.sample(sample_args, [p, q])
if is_addition:
return _add_question_or_entity(context, p, q, is_question)
else:
return _sub_question_or_entity(context, p, q, is_question)
def add_or_sub_in_base(sample_args):
"""Module for addition and subtraction in another base."""
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_p, entropy_q = _entropy_for_pair(entropy)
p = number.integer(entropy_p, signed=True)
q = number.integer(entropy_q, signed=True)
base = random.randint(2, 16)
if random.choice([False, True]):
answer = p + q
template = 'In base {base}, what is {p} + {q}?'
else:
answer = p - q
template = 'In base {base}, what is {p} - {q}?'
return example.Problem(
question=example.question(
context,
template,
base=base,
p=display.NumberInBase(p, base),
q=display.NumberInBase(q, base)),
answer=display.NumberInBase(answer, base))
def mul(value, sample_args, context=None):
"""Returns random question for multiplying two numbers."""
del value # unused
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_p, entropy_q = _entropy_for_pair(entropy)
p = number.integer_or_decimal(entropy_p, True)
q = number.integer_or_decimal(entropy_q, True)
p, q = context.sample(sample_args, [p, q])
answer = p.value * q.value
if is_question:
templates = [
'{p}' + ops.MUL_SYMBOL + '{q}',
'{p} ' + ops.MUL_SYMBOL + ' {q}',
'Calculate {p}' + ops.MUL_SYMBOL + '{q}.',
'Work out {p} ' + ops.MUL_SYMBOL + ' {q}.',
'Multiply {p} and {q}.',
'Product of {p} and {q}.',
'What is the product of {p} and {q}?',
'{p} times {q}',
'What is {p} times {q}?',
]
template = random.choice(templates)
return example.Problem(
question=example.question(context, template, p=p, q=q),
answer=answer
)
else:
return composition.Entity(
context=context,
value=answer,
description='Let {self} = {p} * {q}.',
p=p, q=q)
def div(value, sample_args, context=None):
"""Returns random question for dividing two numbers."""
del value # unused
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_1, entropy_q = _entropy_for_pair(entropy)
q = number.integer(entropy_q, True, min_abs=1)
if random.choice([False, True]):
# Pick p/q with nice integer result.
answer = number.integer(entropy_1, True)
p = answer * q
else:
p = number.integer(entropy_1, True)
answer = p / q
p, q = context.sample(sample_args, [p, q])
if is_question:
template = random.choice([
'Divide {p} by {q}.',
'{p} divided by {q}',
'What is {p} divided by {q}?',
'Calculate {p} divided by {q}.',
])
return example.Problem(
question=example.question(context, template, p=p, q=q),
answer=answer
)
else:
return composition.Entity(
context=context,
value=answer,
description='Let {self} be {p} divided by {q}.',
p=p, q=q)
def nearest_integer_root(sample_args):
"""E.g., "Calculate the cube root of 35 to the nearest integer."."""
context = composition.Context()
# With at least 50% probability, pick square or cube root (these are most
# important roots!).
if random.choice([False, True]):
one_over_exponent = random.randint(2, 3)
else:
one_over_exponent = random.randint(2, 10)
entropy, sample_args = sample_args.peel()
value = number.integer(entropy, signed=False)
answer = int(round(value ** (1 / one_over_exponent)))
templates = [
'What is {value} to the power of 1/{one_over_exponent}, to the nearest'
' integer?',
]
if one_over_exponent != 2: # "What is the second root of 4?" never used.
ordinal = str()
templates += [
'What is the {ordinal} root of {value} to the nearest integer?',
]
if one_over_exponent == 2:
templates += [
'What is the square root of {value} to the nearest integer?',
]
elif one_over_exponent == 3:
templates += [
'What is the cube root of {value} to the nearest integer?',
]
template = random.choice(templates)
ordinal = display.StringOrdinal(one_over_exponent)
return example.Problem(
question=example.question(
context, template, value=value, ordinal=ordinal,
one_over_exponent=one_over_exponent),
answer=answer)
def _calculate(value, sample_args, context, add_sub, mul_div, length=None):
"""Questions for evaluating arithmetic expressions."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value in [_INT, _INT_OR_RATIONAL]:
value_entropy = max(1.0, entropy / 4)
entropy = max(1.0, entropy - value_entropy)
sampler = _value_sampler(value)
value = sampler(value_entropy)
op = arithmetic.arithmetic(
value=value, entropy=entropy, add_sub=add_sub, mul_div=mul_div,
length=length)
context.sample_by_replacing_constants(sample_args, op)
if is_question:
template = random.choice([
'{op}',
'What is {op}?',
'Evaluate {op}.',
'Calculate {op}.',
'What is the value of {op}?',
])
return example.Problem(
question=example.question(context, template, op=op),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
expression=op,
description='Let {self} be {op}.',
op=op)
def add_sub_multiple(value, sample_args, length=None):
return _calculate(
value, sample_args, None, add_sub=True, mul_div=False, length=length)
def mul_div_multiple(value, sample_args, length=None):
return _calculate(
value, sample_args, None, add_sub=False, mul_div=True, length=length)
@composition.module(number.is_integer_or_rational)
def mixed(value, sample_args, context=None, length=None):
return _calculate(
value, sample_args, context, add_sub=True, mul_div=True, length=length)
def _surd_coefficients(sympy_exp):
"""Extracts coefficients a, b, where sympy_exp = a + b * sqrt(base)."""
sympy_exp = sympy.simplify(sympy.expand(sympy_exp))
def extract_b(b_sqrt_base):
"""Returns b from expression of form b * sqrt(base)."""
if isinstance(b_sqrt_base, sympy.Pow):
# Just form sqrt(base)
return 1
else:
assert isinstance(b_sqrt_base, sympy.Mul)
assert len(b_sqrt_base.args) == 2
assert b_sqrt_base.args[0].is_rational
assert isinstance(b_sqrt_base.args[1], sympy.Pow) # should be sqrt.
return b_sqrt_base.args[0]
if sympy_exp.is_rational:
# Form: a.
return sympy_exp, 0
elif isinstance(sympy_exp, sympy.Add):
# Form: a + b * sqrt(base)
assert len(sympy_exp.args) == 2
assert sympy_exp.args[0].is_rational
a = sympy_exp.args[0]
b = extract_b(sympy_exp.args[1])
return a, b
else:
# Form: b * sqrt(base).
return 0, extract_b(sympy_exp)
def _surd_split_entropy_two(entropy):
entropy_left = entropy / 2
if entropy_left < 1:
entropy_left = 0
entropy_right = entropy - entropy_left
if random.choice([False, True]):
entropy_left, entropy_right = entropy_right, entropy_left
return entropy_left, entropy_right
def _sample_surd(base, entropy, max_power, multiples_only):
"""An expression that can be reduced to a + b * sqrt(base).
For example, if base=3, then the following are valid expressions:
* sqrt(12) (reduces to 2 * sqrt(3))
* sqrt(3) - 10 * sqrt(3) (reduces to -9 * sqrt(3))
* sqrt(15) / sqrt(5) (reduces to sqrt(3)).
* 4 * sqrt(3) / 2
* 2 + sqrt(3)
* 1 / (1 + sqrt(3)) (reduces to -1/2 + (-1/2) sqrt(3))
However, 1 + 2 * sqrt(3) is not valid, as it does not reduce to the form
a * sqrt(3).
Args:
base: The value inside the square root.
entropy: Float >= 0; used for randomness.
max_power: Integer >= 1; the max power used in expressions. If 1 then
disables.
multiples_only: Whether the surd should be an integer multiple of
sqrt(base).
Returns:
Instance of `ops.Op`.
"""
if entropy <= 0:
return ops.Sqrt(base)
def add_or_sub_():
# Add or subtract two such types.
entropy_left, entropy_right = _surd_split_entropy_two(entropy)
left = _sample_surd(base, entropy_left, max_power, multiples_only)
right = _sample_surd(base, entropy_right, max_power, multiples_only)
op = random.choice([ops.Add, ops.Sub])
return op(left, right)
def mul_by_integer():
entropy_k = min(1, entropy)
left = number.integer(entropy_k, signed=True, min_abs=1)
right = _sample_surd(base, entropy - entropy_k, max_power, multiples_only)
if random.choice([False, True]):
left, right = right, left
return ops.Mul(left, right)
def div_by_sqrt_k():
"""Do sqrt(k * base) / sqrt(k)."""
entropy_k = min(1, entropy)
k = number.integer(entropy_k, signed=False, min_abs=2)
entropy_left, entropy_right = _surd_split_entropy_two(entropy - entropy_k)
k_base_expr = _sample_surd(k * base, entropy_left, max_power, True)
while True:
k_expr = _sample_surd(k, entropy_right, max_power, True)
if k_expr.sympy() != 0:
break
return ops.Div(k_base_expr, k_expr)
def square_k():
"""Do sqrt(k * k * base)."""
entropy_k = min(1, entropy)
k = number.integer(entropy_k, signed=False, min_abs=2)
return _sample_surd(
k * k * base, entropy - entropy_k, max_power, multiples_only)
def surd_plus_integer():
"""Do surd + integer."""
entropy_k = min(1, entropy)
left = number.integer(entropy_k, signed=True)
assert not multiples_only
right = _sample_surd(base, entropy - entropy_k, max_power, False)
if random.choice([True, False]):
left, right = right, left
return ops.Add(left, right)
def power():
"""Do surd**2."""
assert not multiples_only
surd = _sample_surd(base, entropy, max_power=1, multiples_only=False)
return ops.Pow(surd, 2)
choices = [add_or_sub_, mul_by_integer]
if not multiples_only:
choices += [surd_plus_integer]
if max_power > 1:
choices += [power]
if base < 64: # prevent value inside sqrt from getting too big
choices += [div_by_sqrt_k, square_k]
which = random.choice(choices)
return which()
def simplify_surd(value, sample_args, context=None):
"""E.g., "Simplify (2 + 5*sqrt(3))**2."."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
while True:
base = random.randint(2, 20)
if sympy.Integer(base).is_prime:
break
num_primes_less_than_20 = 8
entropy -= math.log10(num_primes_less_than_20)
exp = _sample_surd(base, entropy, max_power=2, multiples_only=False)
simplified = sympy.expand(sympy.simplify(exp))
template = random.choice([
'Simplify {exp}.',
])
return example.Problem(
question=example.question(context, template, exp=exp),
answer=simplified)
| mathematics_dataset-master | mathematics_dataset/modules/arithmetic.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algebra-related questions, e.g., "Solve 1 + x = 2."."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import linear_system
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.sample import polynomials
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import numpy as np
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
_ENTROPY_EXTRAPOLATE = (12, 12)
# In generating a polynomial with real roots (where the roots are generated
# sequentially), this is the probability of taking a previous root, thus giving
# at least one repeated root, rather than sampling a new number. The value is
# somewhat arbitrary, but gives a "medium probability" of seeing a repeated root
# for lowish degree polynomials.
_POLY_PROBABILITY_REPEATED_ROOT = 0.2
def _make_modules(entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(2, 4, *entropy)
return {
# Solving equations:
'polynomial_roots': functools.partial(
polynomial_roots, None, sample_args_pure),
'polynomial_roots_composed': functools.partial(
polynomial_roots, None, sample_args_composed),
'linear_1d': functools.partial(
solve_linear_1d, None, sample_args_pure),
'linear_1d_composed': functools.partial(
solve_linear_1d, None, sample_args_composed),
'linear_2d': functools.partial(
solve_linear_2d, None, sample_args_pure),
'linear_2d_composed': functools.partial(
solve_linear_2d, None, sample_args_composed),
# Sequences:
'sequence_next_term': functools.partial(sequence_next_term, *entropy),
'sequence_nth_term': functools.partial(sequence_nth_term, *entropy),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(entropy_fn(_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
sample_args_pure = composition.PreSampleArgs(1, 1, *_ENTROPY_EXTRAPOLATE)
return {
'polynomial_roots_big': functools.partial(
polynomial_roots, None, sample_args_pure),
}
def _sample_roots(entropy):
"""Generates `num_distinct + num_repeated` polynomial roots."""
num_roots = random.randint(2, 5)
num_repeated = np.random.binomial(
num_roots - 1, _POLY_PROBABILITY_REPEATED_ROOT)
# Slight hack: don't allow all the roots to be repeated when the entropy is
# high, as this can create very large coefficients.
if entropy > 4:
num_repeated = min(num_repeated, int(num_roots / 2))
num_distinct = num_roots - num_repeated
entropies = entropy * np.random.dirichlet(np.ones(num_distinct))
roots = []
for root_entropy in entropies:
# Generates a root with small probability of being rational.
# (Otherwise when we multiply out the denominators, we get really large
# coefficients in our polynomial.)
if random.random() < 0.1:
root = number.non_integer_rational(root_entropy, True)
else:
root = number.integer(root_entropy, True)
roots.append(root)
for _ in range(num_repeated):
roots.append(random.choice(roots[:num_distinct]))
return roots
def _polynomial_coeffs_with_roots(roots, scale_entropy):
"""Returns a polynomial with the given roots.
The polynomial is generated by expanding product_{root in roots} (x - root),
and then (1) scaling by the coefficients so they are all integers with lcm 1,
and then (2) further scaling the coefficients by a random integer or rational
with `scale_entropy` digits.
Args:
roots: List of values.
scale_entropy: Float; entropy of the random coefficient scaling.
Returns:
List of coefficients `coeffs`, such that `coeffs[i]` is the coefficient of
variable ** i.
"""
variable = sympy.Symbol('x') # doesn't matter, only use coefficients
polynomial = sympy.Poly(sympy.prod([variable - root for root in roots]))
coeffs_reversed = polynomial.all_coeffs()
assert len(coeffs_reversed) == len(roots) + 1
coeffs = list(reversed(coeffs_reversed))
# Multiply terms to change rationals to integers, and then maybe reintroduce.
lcm = sympy.lcm([sympy.denom(coeff) for coeff in coeffs])
if scale_entropy > 0:
while True:
scale = number.integer_or_rational(scale_entropy, signed=True)
if scale != 0:
break
else:
scale = 1
return [coeff * scale * lcm for coeff in coeffs]
def polynomial_roots(value, sample_args, context=None):
"""E.g., "Solve 2*x**2 - 18 = 0."."""
del value # not currently used
# is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
scale_entropy = min(entropy / 2, 1)
roots = _sample_roots(entropy - scale_entropy)
solutions = sorted(list(sympy.FiniteSet(*roots)))
coeffs = _polynomial_coeffs_with_roots(roots, scale_entropy)
(polynomial_entity,) = context.sample(
sample_args, [composition.Polynomial(coeffs)])
if random.choice([False, True]):
# Ask for explicit roots.
if len(solutions) == 1:
answer = solutions[0]
else:
answer = display.NumberList(solutions)
if polynomial_entity.has_expression():
equality = ops.Eq(polynomial_entity.expression, 0)
variable = polynomial_entity.polynomial_variables[0]
else:
variable = sympy.Symbol(context.pop())
equality = ops.Eq(polynomial_entity.handle.apply(variable), 0)
template = random.choice([
'Let {equality}. What is {variable}?',
'Let {equality}. Calculate {variable}.',
'Suppose {equality}. What is {variable}?',
'Suppose {equality}. Calculate {variable}.',
'What is {variable} in {equality}?',
'Solve {equality} for {variable}.',
'Find {variable} such that {equality}.',
'Find {variable}, given that {equality}.',
'Determine {variable} so that {equality}.',
'Determine {variable}, given that {equality}.',
'Solve {equality}.'
])
return example.Problem(
question=example.question(
context, template, equality=equality, variable=variable),
answer=answer)
else:
if polynomial_entity.has_expression():
expression = polynomial_entity.expression
variable = polynomial_entity.polynomial_variables[0]
else:
variable = sympy.Symbol(context.pop())
expression = polynomial_entity.handle.apply(variable)
factored = sympy.factor(
polynomials.coefficients_to_polynomial(coeffs, variable))
template = random.choice([
'Factor {expression}.',
])
return example.Problem(
question=example.question(context, template, expression=expression),
answer=factored)
def _solve_linear_system(degree, value, sample_args, context=None):
"""Solve linear equations."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
solutions = []
if value is not None:
solutions.append(value)
extra_solutions_needed = degree - len(solutions)
if extra_solutions_needed > 0:
entropies = (entropy / 4) * np.random.dirichlet(
np.ones(extra_solutions_needed))
entropies = np.maximum(1, entropies) # min per-solution entropy
entropy -= sum(entropies)
solutions += [number.integer(solution_entropy, True)
for solution_entropy in entropies]
entropy = max(1, entropy)
variables = [sympy.Symbol(context.pop()) for _ in range(degree)]
solution_index = 0
# If we're going to be creating a linear system with constants to replace by
# handles from other modules, then we need a linear system with constants
# occurring. Very occasionally this can fail to happen, e.g., "x = -x";
# normally this while loop will only see one iteration.
while True:
equations = linear_system.linear_system(
variables=variables, solutions=solutions, entropy=entropy,
non_trivial_in=solution_index)
constants = ops.number_constants(equations)
if sample_args.num_modules <= 1 or constants:
break
context.sample_by_replacing_constants(sample_args, equations)
variable = variables[solution_index]
answer = solutions[solution_index]
equations = ', '.join([str(equation) for equation in equations])
if is_question:
template = random.choice([
'Solve {equations} for {variable}.',
])
return example.Problem(
example.question(
context, template, equations=equations,
variable=variable),
answer)
else:
return composition.Entity(
context=context,
value=answer,
description='Suppose {equations}.',
handle=variable,
equations=equations)
@composition.module(number.is_integer)
def solve_linear_1d(*args, **kwargs):
return _solve_linear_system(1, *args, **kwargs)
@composition.module(number.is_integer)
def solve_linear_2d(*args, **kwargs):
return _solve_linear_system(2, *args, **kwargs)
class _PolynomialSequence(object):
"""A sequence given by a polynomial."""
def __init__(self, variable, entropy, min_degree=1, max_degree=3):
"""Initializes a random polynomial sequence.
Args:
variable: Variable to use.
entropy: Entropy for polynomial coefficients.
min_degree: Minimum order of polynomial.
max_degree: Maximum order of polynomial.
"""
self._degree = random.randint(min_degree, max_degree)
self._variable = variable
polynomial = polynomials.sample_with_small_evaluation(
variable=self._variable, degree=self._degree,
max_abs_input=self._degree + 2, entropy=entropy)
self._sympy = polynomial.sympy()
@property
def min_num_terms(self):
"""Returns the minimum number of terms to identify the sequence.
This assumes a human-like prior over types of sequences.
Returns:
Integer >= 1.
"""
return self._degree + 2
@property
def sympy(self):
return self._sympy
def term(self, n):
"""Returns the `n`th term of the sequence."""
return self._sympy.subs(self._variable, n)
def sequence_next_term(min_entropy, max_entropy):
"""E.g., "What is the next term in the sequence 1, 2, 3?"."""
entropy = random.uniform(min_entropy, max_entropy)
context = composition.Context()
variable = sympy.Symbol(context.pop())
sequence = _PolynomialSequence(variable, entropy)
min_num_terms = sequence.min_num_terms
num_terms = random.randint(min_num_terms, min_num_terms + 3)
sequence_sample = [sequence.term(n + 1) for n in range(num_terms)]
sequence_sample = display.NumberList(sequence_sample)
template = random.choice([
'What is next in {sequence}?',
'What comes next: {sequence}?',
'What is the next term in {sequence}?',
])
answer = sequence.term(num_terms + 1)
return example.Problem(
question=example.question(context, template, sequence=sequence_sample),
answer=answer)
def sequence_nth_term(min_entropy, max_entropy):
"""E.g., "What is the nth term in the sequence 1, 2, 3?"."""
entropy = random.uniform(min_entropy, max_entropy)
context = composition.Context()
variable = sympy.Symbol(context.pop())
sequence = _PolynomialSequence(variable, entropy)
min_num_terms = sequence.min_num_terms
num_terms = random.randint(min_num_terms, min_num_terms + 3)
sequence_sample = [sequence.term(n + 1) for n in range(num_terms)]
sequence_sample = display.NumberList(sequence_sample)
template = random.choice([
'What is the {variable}\'th term of {sequence}?',
])
answer = sequence.sympy
return example.Problem(
question=example.question(
context, template, variable=variable, sequence=sequence_sample),
answer=answer)
| mathematics_dataset-master | mathematics_dataset/modules/algebra.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measurement questions, e.g., "How many hours are there in a day?"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.modules import train_test_split
from mathematics_dataset.sample import number
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import six
import sympy
def _make_modules(is_train):
"""Returns modules, with split based on the boolean `is_train`."""
return {
'conversion': functools.partial(
conversion, is_train=is_train, is_extrapolation=False),
'time': functools.partial(time, is_train=is_train),
}
def train(entropy_fn):
"""Returns dict of training modules."""
del entropy_fn # unused
return _make_modules(is_train=True)
def test():
"""Returns dict of testing modules."""
return _make_modules(is_train=False)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
'conversion': functools.partial(
conversion, is_train=False, is_extrapolation=True),
}
Unit = collections.namedtuple('Unit', ('name', 'symbol'))
MICRO_SYMBOL = 'u'
LENGTH = {
Unit('meter', 'm'): 1,
Unit('kilometer', 'km'): 1000,
Unit('centimeter', 'cm'): sympy.Rational(1, 100),
Unit('millimeter', 'mm'): sympy.Rational(1, 1000),
Unit('micrometer', 'um'): sympy.Rational(1, 1e6),
Unit('nanometer', 'nm'): sympy.Rational(1, 1e9),
}
TIME = {
Unit('second', 's'): 1,
Unit('minute', None): 60,
Unit('hour', None): 60*60,
Unit('day', None): 24*60*60,
Unit('week', None): 7*24*60*60,
Unit('millisecond', 'ms'): sympy.Rational(1, 1e3),
Unit('microsecond', MICRO_SYMBOL + 's'): sympy.Rational(1, 1e6),
Unit('nanosecond', 'ns'): sympy.Rational(1, 1e9),
}
TIME_YEARLY = {
Unit('year', None): 1,
Unit('decade', None): 10,
Unit('century', None): 100,
Unit('millennium', None): 1000,
Unit('month', None): sympy.Rational(1, 12),
}
MASS = {
Unit('kilogram', 'kg'): 1, # Yes, the *kilo*gram is the SI base unit.
Unit('tonne', 't'): 1000,
Unit('gram', 'g'): sympy.Rational(1, 1e3),
Unit('milligram', 'mg'): sympy.Rational(1, 1e6),
Unit('microgram', MICRO_SYMBOL + 'g'): sympy.Rational(1, 1e9),
Unit('nanogram', 'ng'): sympy.Rational(1, 1e12),
}
VOLUME = {
Unit('litre', 'l'): 1,
Unit('millilitre', 'ml'): sympy.Rational(1, 1000),
}
DIMENSIONS = [LENGTH, TIME, TIME_YEARLY, MASS, VOLUME]
def pluralize(name):
if name == 'century':
return 'centuries'
if name == 'millennium':
return 'millennia'
return name + 's'
def _factor_non_decimal(value):
"""Extras x dividing value such that x is coprime to 2 and 5."""
result = 1
factors = sympy.factorint(value)
for factor, power in six.iteritems(factors):
if factor not in [2, 5]:
result *= factor ** power
return result
def _sample_conversion_decimal(dimension, is_extrapolation):
"""Samples to and from units and values."""
base_unit, target_unit = random.sample(list(dimension.keys()), 2)
scale = sympy.Rational(dimension[base_unit]) / dimension[target_unit]
scale_non_decimal = _factor_non_decimal(sympy.denom(scale))
entropy = 9 if is_extrapolation else 7
base_value = number.non_integer_decimal(entropy, signed=False)
base_value = display.Decimal(base_value.value * scale_non_decimal)
target_value = display.Decimal(base_value.value * scale)
return base_value, base_unit, target_value, target_unit
def _conversion_decimal(context, is_train, is_extrapolation):
"""E.g., "How many grams are in 5kg?"."""
dimension = random.choice(DIMENSIONS)
while True:
base_value, base_unit, target_value, target_unit = (
_sample_conversion_decimal(dimension, is_extrapolation))
if train_test_split.is_train(base_value) == is_train:
break
templates = [
'How many {target_name} are there in {base_value} {base_name}?',
'What is {base_value} {base_name} in {target_name}?',
'Convert {base_value} {base_name} to {target_name}.',
]
if base_unit.symbol is not None:
templates += [
'How many {target_name} are there in {base_value}{base_symbol}?',
'What is {base_value}{base_symbol} in {target_name}?',
'Convert {base_value}{base_symbol} to {target_name}.',
]
template = random.choice(templates)
base_name = pluralize(base_unit.name)
target_name = pluralize(target_unit.name)
question = example.question(
context,
template,
base_name=base_name,
base_symbol=base_unit.symbol,
base_value=base_value,
target_name=target_name)
return example.Problem(question=question, answer=target_value)
def _conversion_fraction(context, is_train):
"""E.g., "How many grams are in three quarters of a kg?"."""
dimension = random.choice(DIMENSIONS)
# Limit probability of giving zero answer.
allow_zero = random.random() < 0.2
# Repeat until we find a pair with an integral answer. (Avoids ambiguity with
# decimals.)
while True:
base_unit, target_unit = random.sample(list(dimension.keys()), 2)
base_value = number.non_integer_rational(2, signed=False)
if train_test_split.is_train(base_value) != is_train:
continue
answer = (base_value * sympy.Rational(dimension[base_unit])
/ sympy.Rational(dimension[target_unit]))
if (abs(answer) <= 100000
and sympy.denom(answer) == 1
and (allow_zero or answer != 0)):
break
template = random.choice([
'How many {target_name} are there in {base_value} of a {base_name}?',
'What is {base_value} of a {base_name} in {target_name}?',
])
if sympy.denom(base_value) > 20 or random.choice([False, True]):
base_value_string = base_value # Will be represented as e.g., 2/3.
else:
base_value_string = display.StringNumber(base_value) # e.g., two thirds
question = example.question(
context, template,
base_name=base_unit.name,
base_value=base_value_string,
target_name=pluralize(target_unit.name))
return example.Problem(question=question, answer=answer)
def conversion(is_train, is_extrapolation):
"""Conversion question, in decimal or fraction."""
context = composition.Context()
# TODO(b/124038528): implement extrapolation for fraction conversions too
if is_extrapolation or random.choice([False, True]):
return _conversion_decimal(
context, is_train=is_train, is_extrapolation=is_extrapolation)
else:
return _conversion_fraction(context, is_train=is_train)
def time(is_train):
"""Questions for calculating start, end, or time differences."""
context = composition.Context()
start_minutes = random.randint(1, 24*60 - 1)
while True:
duration_minutes = random.randint(1, 12*60 - 1)
if train_test_split.is_train(duration_minutes) == is_train:
break
end_minutes = start_minutes + duration_minutes
def format_12hr(minutes):
"""Format minutes from midnight in 12 hr format."""
hours = (minutes // 60) % 24
minutes %= 60
am_pm = 'AM' if hours < 12 else 'PM'
hours = (hours - 1) % 12 + 1
return '{}:{:02} {}'.format(hours, minutes, am_pm)
start = format_12hr(start_minutes)
end = format_12hr(end_minutes)
which_question = random.randint(0, 3)
if which_question == 0:
# Question: What is start = end - duration?
template = random.choice([
'What is {duration} minutes before {end}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, end=end),
answer=start)
elif which_question == 1:
# Question: What is end = start + duration?
template = random.choice([
'What is {duration} minutes after {start}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, start=start),
answer=end)
else:
# Question: What is duration = end - start?
template = random.choice([
'How many minutes are there between {start} and {end}?',
])
return example.Problem(
question=example.question(context, template, start=start, end=end),
answer=duration_minutes)
| mathematics_dataset-master | mathematics_dataset/modules/measurement.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mathematics_dataset-master | mathematics_dataset/modules/__init__.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Number-related questions, e.g., "write seventy-two as a number"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import number
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import numpy as np
import six
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
_ENTROPY_EXTRAPOLATE = (12, 12)
# Number of module compositions appearing in train/test, and extrapolation data.
_NUM_MODULES_COMPOSED = [2, 4]
def _make_modules(entropy, num_modules_composed):
"""Returns modules given "difficulty" parameters."""
fns = {
'gcd': gcd,
'lcm': lcm,
'div_remainder': div_remainder,
'is_prime': is_prime,
'is_factor': is_factor,
'round_number': round_number,
'place_value': place_value,
'list_prime_factors': list_prime_factors,
}
# These modules don't have both pure and composed.
modules = {
'base_conversion': functools.partial(base_conversion, *entropy),
}
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(
num_modules_composed[0], num_modules_composed[1], *entropy)
for name, module in six.iteritems(fns):
modules[name] = functools.partial(module, None, sample_args_pure)
modules[name + '_composed'] = functools.partial(
module, None, sample_args_composed)
return modules
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(
entropy=entropy_fn(_ENTROPY_TRAIN),
num_modules_composed=_NUM_MODULES_COMPOSED)
def test():
"""Returns dict of testing modules."""
return _make_modules(
entropy=_ENTROPY_INTERPOLATE,
num_modules_composed=_NUM_MODULES_COMPOSED)
def test_extra():
"""Returns dict of extrapolation testing modules."""
sample_args_pure = composition.PreSampleArgs(1, 1, *_ENTROPY_EXTRAPOLATE)
return {
'round_number_big': functools.partial(
round_number, None, sample_args_pure),
'place_value_big': functools.partial(place_value, None, sample_args_pure),
}
def place_value(value, sample_args, context=None):
"""E.g., "Q: What is the tens digit of 31859? A: 5."""
del value # unused for now
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
integer = number.integer(entropy, signed=False, min_abs=1)
(entity,) = context.sample(sample_args, [integer])
integer_as_string = str(integer)
num_digits = len(integer_as_string)
firsts = ['', 'ten ', 'hundred ']
seconds = [
'thousands', 'millions', 'billions', 'trillions', 'quadrillions',
'quintillions', 'sextillions', 'septillions', 'octillions', 'nonillions',
'decillions',
]
place_names = ['units', 'tens', 'hundreds']
for second in seconds:
for first in firsts:
place_names.append(first + second)
place = random.randint(1, num_digits) # 1 = units, 2 = tens, etc.
place_name = place_names[place - 1]
answer = sympy.Integer(integer_as_string[num_digits - place])
return example.Problem(
question=example.question(
context,
'What is the {place_name} digit of {integer}?',
place_name=place_name, integer=entity.expression_else_handle),
answer=answer)
# TODO(b/124040078): add to composition system?
def round_number(value, sample_args, context=None):
"""Question for rounding integers and decimals."""
del value # unused for now
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
# This is the power of 10 to round to. E.g., power == 0 corresponds to
# rounding to the nearest integer; power == -2 corresponds to rounding to two
# decimal places, and power == 3 corresponds to rounding to the nearest 1000.
power = random.randint(-7, 6)
answer_entropy = 1 + random.uniform(0, entropy / 2)
entropy = max(1, entropy - answer_entropy)
value_integer = number.integer(answer_entropy, signed=True)
remainder_divisor = 10 ** int(math.ceil(entropy))
remainder_range_lower = -remainder_divisor / 2
remainder_range_upper = remainder_divisor / 2
if value_integer <= 0:
remainder_range_lower += 1
if value_integer >= 0:
remainder_range_upper -= 1
remainder = random.randint(remainder_range_lower, remainder_range_upper)
input_ = value_integer + sympy.Rational(remainder, remainder_divisor)
scale = 10**power if power >= 0 else sympy.Rational(1, 10**(-power))
input_ = input_ * scale
value = value_integer * scale
if not number.is_integer(input_):
input_ = display.Decimal(input_)
if not number.is_integer(value):
value = display.Decimal(value)
(input_,) = context.sample(sample_args, [input_])
if power > 0:
# Rounding to a power of ten.
round_to = 10**power
if random.choice([False, True]):
# Write the rounding value as a word instead.
round_to = display.StringNumber(round_to,
join_number_words_with_hyphens=False)
description = 'the nearest {round_to}'.format(round_to=round_to)
elif power == 0 and random.choice([False, True]):
# Round to nearest integer.
description = 'the nearest integer'
else:
# Round to decimal places.
description = random.choice(['{dps} decimal place', '{dps} dp'])
if power != -1:
# Plural
description += 's'
dps = -power
if random.choice([False, True]):
dps = display.StringNumber(dps)
description = description.format(dps=dps)
template = random.choice([
'Round {input} to {description}.',
'What is {input} rounded to {description}?',
])
return example.Problem(
question=example.question(
context, template, input=input_, description=description),
answer=value)
def _semi_prime(entropy):
"""Generates a semi-prime with the given entropy."""
# Add on extra entropy to account for the sparsity of the primes; we don't
# actually use the integers sampled, but rather a random prime close to them;
# thus some entropy is lost, which we must account for
entropy += math.log10(max(1, entropy * math.log(10)))
# We intentionally uniformy sample the "entropy" (i.e., approx number digits)
# of the two factors.
entropy_1, entropy_2 = entropy * np.random.dirichlet([1, 1])
# Need >= 2 for randprime to always work (Betrand's postulate).
approx_1 = number.integer(entropy_1, signed=False, min_abs=2)
approx_2 = number.integer(entropy_2, signed=False, min_abs=2)
factor_1 = sympy.ntheory.generate.randprime(approx_1 / 2, approx_1 * 2)
factor_2 = sympy.ntheory.generate.randprime(approx_2 / 2, approx_2 * 2)
return factor_1 * factor_2
def is_prime(value, sample_args, context=None):
"""Questions asking about primality."""
del value # unused for now
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
composite = _semi_prime(entropy)
if random.choice([False, True]):
# Use the composite
integer = composite
is_prime_ = False
else:
# Take the next prime after the composite, to ensure the same distribution
# as composites. Do "composite - 4" so we occasionally see "2" as a prime.
integer = sympy.ntheory.generate.nextprime(composite - 4)
is_prime_ = True
(integer_entity,) = context.sample(sample_args, [integer])
if random.choice([False, True]) and integer != 1:
answer = not is_prime_
attribute_name = random.choice(['composite', 'a composite number'])
else:
answer = is_prime_
attribute_name = random.choice(['prime', 'a prime number'])
return example.Problem(
question=example.question(
context, 'Is {integer} {attribute}?',
integer=integer_entity.expression_else_handle,
attribute=attribute_name),
answer=answer)
def is_factor(value, sample_args, context=None):
"""E.g., "Is 5 a factor of 48?"."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_factor = 1 + random.uniform(0, entropy/3)
entropy = max(0, entropy - entropy_factor)
maybe_factor = number.integer(entropy_factor, False, min_abs=2)
integer = maybe_factor * number.integer(entropy, False, min_abs=1)
# Produce balanced classes.
if random.choice([False, True]):
# The following makes it not a factor.
integer += random.randint(1, maybe_factor - 1)
(entity,) = context.sample(sample_args, [integer])
templates = [
'Is {maybe_factor} a factor of {value}?',
'Is {value} a multiple of {maybe_factor}?',
'Does {maybe_factor} divide {value}?',
]
if maybe_factor == 2:
templates += [
'Is {value} even?',
]
template = random.choice(templates)
answer = integer % maybe_factor == 0
return example.Problem(
question=example.question(
context, template, maybe_factor=maybe_factor,
value=entity.expression_else_handle),
answer=answer)
def list_prime_factors(value, sample_args, context=None):
"""E.g., "What are the prime factors of 36?"."""
del value # unused for now
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy = max(1, entropy)
integer = number.integer(entropy, signed=False, min_abs=2)
(entity,) = context.sample(sample_args, [integer])
prime_factors = sorted(sympy.factorint(integer).keys())
template = random.choice([
'What are the prime factors of {integer}?',
'List the prime factors of {integer}.',
])
return example.Problem(
question=example.question(
context, template, integer=entity.expression_else_handle),
answer=display.NumberList(prime_factors))
def _pair_with_large_hidden_factor(entropy):
"""Returns pair of numbers with possibly large common factor hidden."""
entropy_p, entropy_q, _ = entropy * np.random.dirichlet([1, 1, 1])
# Min entropy on p and q to minimize trivial solutions.
entropy_p = max(1, entropy_p)
entropy_q = max(1, entropy_q)
entropy_mult = max(0, entropy - entropy_p - entropy_q)
p = number.integer(entropy_p, False, min_abs=1)
q = number.integer(entropy_q, False, min_abs=1)
mult = number.integer(entropy_mult, False, min_abs=1)
p *= mult
q *= mult
return p, q
def lcm(value, sample_args, context=None):
"""Question for least common multiple of p and q."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
p, q = _pair_with_large_hidden_factor(entropy)
answer = sympy.lcm(p, q)
if random.choice([False, True]):
p, q = context.sample(sample_args, [p, q])
# Ask the question directly.
adjective = random.choice(['least', 'lowest', 'smallest'])
template = random.choice([
'Calculate the {adjective} common multiple of {p} and {q}.',
'What is the {adjective} common multiple of {p} and {q}?',
])
return example.Problem(
question=example.question(
context, template, adjective=adjective, p=p.expression_else_handle,
q=q.expression_else_handle),
answer=answer)
else:
# Phrase the question as finding the common denominator of two fractions.
p = number.integer(2, signed=True, coprime_to=p) / p
q = number.integer(2, signed=True, coprime_to=q) / q
p, q = context.sample(sample_args, [p, q])
template = random.choice([
'What is the common denominator of {p} and {q}?',
'Find the common denominator of {p} and {q}.',
'Calculate the common denominator of {p} and {q}.',
])
return example.Problem(
question=example.question(
context, template, p=p.expression_else_handle,
q=q.expression_else_handle),
answer=answer)
def _random_coprime_pair(entropy):
"""Returns a pair of random coprime integers."""
coprime_product = number.integer(entropy, False, min_abs=1)
factors = sympy.factorint(coprime_product)
def take():
prime = random.choice(list(factors.keys()))
power = factors[prime]
del factors[prime]
return prime ** power
if random.random() < 0.8 and len(factors) >= 2:
# Disallow trivial factoring where possible.
count_left = random.randint(1, len(factors) - 1)
count_right = len(factors) - count_left
else:
count_left = random.randint(0, len(factors))
count_right = len(factors) - count_left
left = sympy.prod([take() for _ in range(count_left)])
right = sympy.prod([take() for _ in range(count_right)])
assert left * right == coprime_product
return left, right
# @composition.module(number.is_positive_integer)
def gcd(value, sample_args, context=None):
"""Question for greatest common divisor of p and q."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
value_entropy = 1 + random.uniform(0, entropy/3)
entropy = max(1, entropy - value_entropy)
value = number.integer(value_entropy, False, min_abs=1)
p_mult, q_mult = _random_coprime_pair(entropy)
p = value * p_mult
q = value * q_mult
assert sympy.gcd(p, q) == value
p, q = context.sample(sample_args, [p, q])
adjective = (random.choice(['greatest', 'highest']) + ' common '
+ random.choice(['divisor', 'factor']))
if is_question:
template = random.choice([
'Calculate the {adjective} of {p} and {q}.',
'What is the {adjective} of {p} and {q}?',
])
return example.Problem(
question=example.question(
context, template, adjective=adjective, p=p, q=q),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
description='Let {self} be the {adjective} of {p} and {q}.',
adjective=adjective, p=p, q=q)
# @composition.module(number.is_positive_integer)
def div_remainder(value, sample_args, context=None):
"""E.g., "What is the remainder when 27 is divided by 5?"."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
entropy_value = 1 + random.uniform(0, entropy/3)
entropy = max(0, entropy - entropy_value)
value = number.integer(entropy_value, signed=False)
entropy_a, entropy_q = entropy * np.random.dirichlet([1, 1])
a = number.integer(entropy_a, signed=False, min_abs=1)
q = value + number.integer(entropy_q, signed=False, min_abs=1)
p = a * q + value
assert p % q == value
p, q = context.sample(sample_args, [p, q])
if is_question:
template = random.choice([
'Calculate the remainder when {p} is divided by {q}.',
'What is the remainder when {p} is divided by {q}?',
])
return example.Problem(
question=example.question(
context, template, p=p.expression_else_handle,
q=q.expression_else_handle),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
description='Let {self} be the remainder when {p} is divided by {q}.',
p=p, q=q)
def base_conversion(min_entropy, max_entropy):
"""E.g., "What is 17 base 8 in base 10?"."""
context = composition.Context()
from_base = random.randint(2, 16)
while True:
to_base = random.randint(2, 16)
if to_base != from_base:
break
# Entropy used up in selecting bases.
entropy_used = math.log10(16 * 15)
entropy = random.uniform(
min_entropy - entropy_used, max_entropy - entropy_used)
value = number.integer(entropy, signed=True)
template = random.choice([
'{from_str} (base {from_base}) to base {to_base}',
'Convert {from_str} (base {from_base}) to base {to_base}.',
'What is {from_str} (base {from_base}) in base {to_base}?',
])
return example.Problem(
question=example.question(
context, template,
from_str=display.NumberInBase(value, from_base),
from_base=from_base,
to_base=to_base),
answer=display.NumberInBase(value, to_base))
| mathematics_dataset-master | mathematics_dataset/modules/numbers.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculus related questions, e.g., "differentiate x**2"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import polynomials
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import numpy as np
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
def _make_modules(entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(2, 4, *entropy)
return {
'differentiate_composed': functools.partial(
differentiate_univariate, None, sample_args_composed),
'differentiate': functools.partial(differentiate, None, sample_args_pure),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(entropy_fn(_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
}
def _generate_polynomial(num_variables, entropy, derivative_order,
derivative_axis):
"""Returns polynomial."""
# Note: numpy randint has upper bound as ) not ], unlike python random.randint
degrees = np.random.randint(1, 4, [num_variables])
degrees[derivative_axis] = np.random.randint(0, 4) # allow to be zero here.
coefficients = polynomials.sample_coefficients(degrees, entropy)
# We also generate coefficients that will disappear when differentiated.
# Thus we don't account for the entropy used here.
assert derivative_order > 0
degrees[derivative_axis] = derivative_order - 1
extra_coefficients = polynomials.sample_coefficients(degrees, entropy)
return np.concatenate(
[extra_coefficients, coefficients], axis=derivative_axis)
def _template(module_count, derivative_order, num_variables):
"""Selects appropriate template."""
templates = [
'Find the {nth} derivative of {eq} wrt {var}.',
'What is the {nth} derivative of {eq} wrt {var}?',
]
if derivative_order == 1:
templates += [
'Differentiate {eq} with respect to {var}.',
'Differentiate {eq} wrt {var}.',
'What is the derivative of {eq} wrt {var}?',
]
derivative_variable_is_unambiguous = num_variables == 1 and module_count == 1
if derivative_variable_is_unambiguous:
templates += [
'Find the {nth} derivative of {eq}.',
'What is the {nth} derivative of {eq}?',
]
if derivative_order == 1:
templates += [
'Differentiate {eq}.',
'What is the derivative of {eq}?',
]
return random.choice(templates)
def _sample_integrand(coefficients, derivative_order, derivative_axis, entropy):
"""Integrates `coefficients` and adds sampled "constant" terms."""
coefficients = np.asarray(coefficients)
# Integrate (with zero for constant terms).
integrand = coefficients
for _ in range(derivative_order):
integrand = polynomials.integrate(integrand, derivative_axis)
# Add on sampled constant terms.
constant_degrees = np.array(integrand.shape) - 1
constant_degrees[derivative_axis] = derivative_order - 1
extra_coeffs = polynomials.sample_coefficients(constant_degrees, entropy)
pad_amount = coefficients.shape[derivative_axis]
pad = [(0, pad_amount if i == derivative_axis else 0)
for i in range(coefficients.ndim)]
extra_coeffs = np.pad(extra_coeffs, pad, 'constant', constant_values=0)
return integrand + extra_coeffs
def _differentiate_polynomial(value, sample_args, context, num_variables):
"""Generates a question for differentiating a polynomial."""
is_question = context is None
if context is None:
context = composition.Context()
if value is not None:
num_variables = value.coefficients.ndim
entropy, sample_args = sample_args.peel()
max_derivative_order = 3
derivative_order = random.randint(1, max_derivative_order)
entropy = max(0, entropy - math.log10(max_derivative_order))
derivative_axis = random.randint(0, num_variables - 1)
if value is None:
coefficients = _generate_polynomial(
num_variables, entropy, derivative_order, derivative_axis)
else:
coefficients = _sample_integrand(
value.coefficients, derivative_order, derivative_axis, entropy)
(entity,) = context.sample(
sample_args, [composition.Polynomial(coefficients)])
value = coefficients
for _ in range(derivative_order):
value = polynomials.differentiate(value, axis=derivative_axis)
nth = display.StringOrdinal(derivative_order)
if entity.has_expression():
polynomial = entity.expression
variables = entity.polynomial_variables
else:
variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]
polynomial = entity.handle.apply(*variables)
variable = variables[derivative_axis]
if is_question:
template = _template(context.module_count, derivative_order, len(variables))
answer = polynomials.coefficients_to_polynomial(value, variables).sympy()
return example.Problem(
question=example.question(
context, template, eq=polynomial, var=variable, nth=nth),
answer=answer)
else:
fn_symbol = context.pop()
variables_string = ', '.join(str(variable) for variable in variables)
assert len(variables) == 1 # since below we don't specify var we diff wrt
return composition.Entity(
context=context,
value=composition.Polynomial(value),
description='Let {fn}({variables}) be the {nth} derivative of {eq}.',
handle=composition.FunctionHandle(fn_symbol),
fn=fn_symbol, variables=variables_string, nth=nth, eq=polynomial)
def differentiate_univariate(value, sample_args, context=None):
return _differentiate_polynomial(value, sample_args, context, 1)
@composition.module(composition.is_polynomial)
def differentiate(value, sample_args, context=None):
num_variables = random.randint(1, 4)
return _differentiate_polynomial(value, sample_args, context, num_variables)
| mathematics_dataset-master | mathematics_dataset/modules/calculus.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Polynomial manipulation (adding, composing, finding coefficients, etc)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.sample import polynomials
from mathematics_dataset.util import composition
import numpy as np
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
def _make_modules(entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(2, 4, *entropy)
sample_args_mixed = composition.PreSampleArgs(1, 4, *entropy)
return {
'coefficient_named':
functools.partial(coefficient_named, None, sample_args_pure),
'evaluate':
functools.partial(evaluate, None, sample_args_pure),
'evaluate_composed':
functools.partial(evaluate, None, sample_args_composed),
# TODO(b/124038948): consider doing pure sample args for 'add'?
'add':
functools.partial(add, None, sample_args_mixed),
'expand':
functools.partial(expand, None, sample_args_pure),
'collect':
functools.partial(collect, None, sample_args_pure),
'compose':
functools.partial(compose, None, sample_args_mixed),
# Rearranging powers:
'simplify_power':
functools.partial(simplify_power, None, sample_args_pure),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(entropy_fn(_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
}
def coefficient_named(value, sample_args, context=None):
"""E.g., "Express x^2 + 2x in the form h * x^2 + k * x + t and give h."."""
del value # not used
if context is None:
context = composition.Context()
variable = sympy.Symbol(context.pop())
entropy, sample_args = sample_args.peel()
degree = random.randint(1, 4)
if random.choice([False, True]):
coefficients = polynomials.sample_coefficients(
degree, entropy/2, min_non_zero=random.randint(degree - 1, degree))
expanded = polynomials.expand_coefficients(coefficients, entropy/2)
expression = polynomials.coefficients_to_polynomial(expanded, variable)
else:
expression = polynomials.sample_with_brackets(variable, degree, entropy)
coefficients = list(reversed(sympy.Poly(expression).all_coeffs()))
named_coeffs = [sympy.Symbol(context.pop()) for _ in range(degree + 1)]
canonical = polynomials.coefficients_to_polynomial(named_coeffs, variable)
if random.random() < 0.2: # only small probability of non-zero power
power = random.randint(0, degree)
else:
non_zero_powers = [i for i in range(degree + 1) if coefficients[i] != 0]
power = random.choice(non_zero_powers)
value = coefficients[power]
named_coeff = named_coeffs[power]
template = random.choice([
'Express {expression} as {canonical} and give {target}.',
'Rearrange {expression} to {canonical} and give {target}.',
'Express {expression} in the form {canonical} and give {target}.',
'Rearrange {expression} to the form {canonical} and give {target}.',
])
return example.Problem(
question=example.question(
context, template, expression=expression, canonical=canonical,
target=named_coeff),
answer=value)
_TEMPLATES = [
'What is {composed}?',
'Calculate {composed}.',
'Give {composed}.',
'Determine {composed}.',
]
@composition.module(number.is_integer)
def evaluate(value, sample_args, context=None):
"""Entity for evaluating an integer-valued polynomial at a given point."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
entropy_value = random.uniform(1, 1 + entropy/3)
entropy = max(0, entropy - entropy_value)
value = number.integer(entropy_value, signed=True)
entropy_input = random.uniform(1, 1 + entropy/3)
entropy = max(0, entropy - entropy_input)
input_ = number.integer(entropy_input, signed=True)
degree = random.randint(1, 3)
entropies = entropy * np.random.dirichlet(list(range(1, degree + 1)))
# Calculate coefficients in reverse order.
target = value
coeffs_reversed = []
for i, coeff_entropy in enumerate(entropies):
power = degree - i
coeff = number.integer(coeff_entropy, signed=True)
if input_ != 0:
coeff += int(round(target / input_ ** power))
if coeff == 0 and i == 0:
# Don't allow zero in leading coefficient.
coeff += random.choice([-1, 1])
coeffs_reversed.append(coeff)
target -= coeff * (input_ ** power)
coeffs_reversed.append(target)
coefficients = list(reversed(coeffs_reversed))
(polynomial_entity, input_) = context.sample(
sample_args, [composition.Polynomial(coefficients), input_])
composed = polynomial_entity.handle.apply(input_.handle)
if is_question:
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=composed),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
expression=composed,
description='Let {self} be {composed}.',
composed=composed)
# TODO(b/124039290): merge with compose? both add and compose do similar things.
@composition.module(composition.is_integer_polynomial)
def add(value, sample_args, context=None):
"""E.g., "Let f(x)=2x+1, g(x)=3x+2. What is 5*f(x) - 7*g(x)?"."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
max_degree = 3
degree = random.randint(1, max_degree)
entropy -= math.log10(max_degree)
entropy_value = entropy / 2
entropy -= entropy_value
value = polynomials.sample_coefficients(
degree, entropy=entropy_value, min_non_zero=random.randint(1, 3))
value = composition.Polynomial(value)
c1, c2, coeffs1, coeffs2 = polynomials.coefficients_linear_split(
value.coefficients, entropy)
coeffs1 = polynomials.trim(coeffs1)
coeffs2 = polynomials.trim(coeffs2)
c1, c2, fn1, fn2 = context.sample(
sample_args,
[c1, c2, composition.Polynomial(coeffs1), composition.Polynomial(coeffs2)]
)
var = sympy.var(context.pop())
expression = (
c1.handle * fn1.handle.apply(var) + c2.handle * fn2.handle.apply(var))
if is_question:
answer = polynomials.coefficients_to_polynomial(value.coefficients, var)
answer = answer.sympy()
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=expression),
answer=answer)
else:
intermediate_symbol = context.pop()
intermediate = sympy.Function(intermediate_symbol)(var)
return composition.Entity(
context=context,
value=value,
description='Let {intermediate} = {composed}.',
handle=composition.FunctionHandle(intermediate_symbol),
intermediate=intermediate,
composed=expression)
def expand(value, sample_args, context=None):
"""E.g., "Expand (x**2 + 1)**2."."""
del value # not used
if context is None:
context = composition.Context()
variable = sympy.Symbol(context.pop())
entropy, sample_args = sample_args.peel()
min_order = 1
max_order = 5
order = random.randint(min_order, max_order)
entropy -= math.log10(max_order - min_order + 1)
expression_ = polynomials.sample_with_brackets(variable, order, entropy)
expanded = sympy.expand(expression_)
template = random.choice([
'Expand {expression}.'
])
return example.Problem(
question=example.question(context, template, expression=expression_),
answer=expanded)
@composition.module(composition.is_polynomial)
def collect(value, sample_args, context=None):
"""Collect terms in an unsimplified polynomial."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
entropy_value, entropy = entropy * np.random.dirichlet([2, 3])
degrees = [random.randint(1, 3)]
value = composition.Polynomial(
polynomials.sample_coefficients(degrees, entropy_value))
assert isinstance(value, composition.Polynomial)
coefficients = value.coefficients
all_coefficients_are_integer = True
for coeff in coefficients.flat:
if not number.is_integer(coeff):
all_coefficients_are_integer = False
break
if all_coefficients_are_integer:
coefficients = polynomials.expand_coefficients(coefficients, entropy)
else:
# put back the unused entropy
sample_args = composition.SampleArgs(
sample_args.num_modules, sample_args.entropy + entropy)
num_variables = coefficients.ndim
variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]
unsimplified = polynomials.coefficients_to_polynomial(coefficients, variables)
simplified = unsimplified.sympy().expand()
# Bit of a hack: handle the very rare case where no number constants appearing
if not ops.number_constants(unsimplified):
unsimplified = ops.Add(unsimplified, ops.Constant(0))
context.sample_by_replacing_constants(sample_args, unsimplified)
if is_question:
template = 'Collect the terms in {unsimplified}.'
return example.Problem(
question=example.question(context, template, unsimplified=unsimplified),
answer=simplified)
else:
function_symbol = context.pop()
function = sympy.Function(function_symbol)(*variables)
return composition.Entity(
context=context,
value=value,
handle=composition.FunctionHandle(function_symbol),
expression=unsimplified,
polynomial_variables=variables,
description='Let {function} = {unsimplified}.',
function=function,
unsimplified=unsimplified)
def compose(value, sample_args, context=None):
"""E.g., "Let f(x)=2x+1, let g(x)=3x+10. What is f(g(x))?"."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_f, entropy_g = entropy * np.random.dirichlet([1, 1])
coeffs_f = polynomials.sample_coefficients([random.randint(1, 2)], entropy_f)
coeffs_g = polynomials.sample_coefficients([random.randint(1, 2)], entropy_g)
entity_f, entity_g = context.sample(
sample_args,
[composition.Polynomial(coeffs_f), composition.Polynomial(coeffs_g)])
variable = sympy.var(context.pop())
poly_f = polynomials.coefficients_to_polynomial(coeffs_f, variable)
poly_g = polynomials.coefficients_to_polynomial(coeffs_g, variable)
poly_f_g = poly_f.sympy().subs(variable, poly_g.sympy()).expand()
expression = composition.FunctionHandle(entity_f, entity_g).apply(variable)
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=expression),
answer=poly_f_g)
def simplify_power(value, sample_args, context=None):
"""E.g., "Simplify ((x**2)**3/x**4)**2/x**3."."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
variable = sympy.symbols(context.pop(), positive=True)
unsimplified = polynomials.sample_messy_power(variable, entropy)
answer = unsimplified.sympy()
template = random.choice([
'Simplify {unsimplified} assuming {variable} is positive.',
])
return example.Problem(
example.question(
context, template, unsimplified=unsimplified, variable=variable),
answer)
| mathematics_dataset-master | mathematics_dataset/modules/polynomials.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Comparisons, e.g. "is 2 > 3?"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import numpy as np
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
_ENTROPY_EXTRAPOLATE = (12, 12)
_EXTRAPOLATION_EXTRA_COUNT = 2
_PROB_EQUAL = 0.2
def _make_modules(entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(2, 4, *entropy)
return {
'pair': functools.partial(pair, sample_args_pure),
'pair_composed': functools.partial(pair, sample_args_composed),
'kth_biggest': functools.partial(kth_biggest, sample_args_pure),
'kth_biggest_composed': functools.partial(
kth_biggest, sample_args_composed),
'closest': functools.partial(closest, sample_args_pure),
'closest_composed': functools.partial(closest, sample_args_composed),
'sort': functools.partial(sort, sample_args_pure),
'sort_composed': functools.partial(sort, sample_args_composed),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(entropy_fn(_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
sample_args_pure = composition.PreSampleArgs(1, 1, *_ENTROPY_EXTRAPOLATE)
def sort_count():
lower = _sort_count_range(_ENTROPY_TRAIN[1])[1]
return random.randint(lower + 1, lower + _EXTRAPOLATION_EXTRA_COUNT)
def closest_count():
lower = _closest_count_range(_ENTROPY_TRAIN[1])[1]
return random.randint(lower + 1, lower + _EXTRAPOLATION_EXTRA_COUNT)
def kth_biggest_more():
return kth_biggest(sample_args_pure, count=sort_count())
def sort_more():
return sort(sample_args_pure, count=sort_count())
def closest_more():
return closest(sample_args_pure, count=closest_count())
return {
'kth_biggest_more': kth_biggest_more,
'sort_more': sort_more,
'closest_more': closest_more,
}
def _make_comparison_question(context, left, right):
"""Makes a question for comparing two values."""
if random.choice([False, True]) and sympy.Ne(left.value, right.value):
# Do question of form: "Which is bigger: a or b?".
if random.choice([False, True]):
answer = (
left.handle if sympy.Gt(left.value, right.value) else right.handle)
template = random.choice([
'Which is bigger: {left} or {right}?',
'Which is greater: {left} or {right}?',
])
else:
answer = (
left.handle if sympy.Lt(left.value, right.value) else right.handle)
template = random.choice([
'Which is smaller: {left} or {right}?',
])
return example.Problem(
question=example.question(context, template, left=left, right=right),
answer=answer)
comparisons = {
'<': sympy.Lt,
'<=': sympy.Le,
'>': sympy.Gt,
'>=': sympy.Ge,
'=': sympy.Eq,
'!=': sympy.Ne,
}
templates = {
'<': [
'Is {left} ' + ops.LT_SYMBOL + ' {right}?',
'Is {left} less than {right}?',
'Is {left} smaller than {right}?',
],
'<=': [
'Is {left} ' + ops.LE_SYMBOL + ' {right}?',
'Is {left} less than or equal to {right}?',
'Is {left} at most {right}?',
'Is {left} at most as big as {right}?',
],
'>': [
'Is {left} ' + ops.GT_SYMBOL + ' {right}?',
'Is {left} greater than {right}?',
'Is {left} bigger than {right}?',
],
'>=': [
'Is {left} ' + ops.GE_SYMBOL + ' {right}?',
'Is {left} greater than or equal to {right}?',
'Is {left} at least {right}?',
'Is {left} at least as big as {right}?',
],
'=': [
'Does {left} ' + ops.EQ_SYMBOL + ' {right}?',
'Are {left} and {right} equal?',
'Is {left} equal to {right}?',
'Do {left} and {right} have the same value?',
],
'!=': [
'Is {left} ' + ops.NE_SYMBOL + ' {right}?',
'Is {left} not equal to {right}?',
'Are {left} and {right} unequal?',
'Are {left} and {right} nonequal?',
'Are {left} and {right} non-equal?',
'Do {left} and {right} have different values?',
],
}
comparison = random.choice(list(comparisons.keys()))
template = random.choice(templates[comparison])
question = example.question(context, template, left=left, right=right)
answer = comparisons[comparison](left.value, right.value)
return example.Problem(question=question, answer=answer)
def integer_or_rational_or_decimal(entropy):
if random.choice([False, True]):
return number.integer_or_decimal(entropy, signed=True)
else:
return number.integer_or_rational(entropy, signed=True)
def pair(sample_args, context=None):
"""Compares two numbers, e.g., "is 1/2 < 0.5?"."""
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
def integers_close():
entropy_diff, entropy_left = entropy * np.random.dirichlet([1, 3])
left = number.integer(entropy_left, True)
right = left + number.integer(entropy_diff, True)
return left, right
def rational_and_integer():
# Pick rational, and integer close to rational evaluation
left = number.non_integer_rational(entropy, True)
right = int(round(left)) + random.randint(-1, 1)
return left, right
def independent():
# Return an independent pair.
entropy_left, entropy_right = entropy * np.random.dirichlet([1, 1])
left = integer_or_rational_or_decimal(entropy_left)
right = integer_or_rational_or_decimal(entropy_right)
return left, right
generator = random.choice([integers_close, rational_and_integer, independent])
left, right = generator()
# maybe swap for symmetry
if random.choice([False, True]):
left, right = right, left
left, right = context.sample(sample_args, [left, right])
return _make_comparison_question(context, left, right)
def _entities_to_list(entities):
entity_dict = {}
values_template = ''
for i, entity in enumerate(entities):
if i > 0:
values_template += ', '
entity_name = 'entity_{}'.format(i)
entity_dict[entity_name] = entity
values_template += '{' + entity_name + '}'
return entity_dict, values_template
def _entities_to_choices(entities, answer):
"""Generate a multichoice question template."""
if len(entities) > 26:
raise ValueError('Too many choices: {}'.format(len(entities)))
entity_dict = {}
choices_template = ''
answer_choice = None
for i, entity in enumerate(entities):
choices_template += ' '
entity_name = 'entity_{}'.format(i)
entity_dict[entity_name] = entity
letter = chr(ord('a') + i)
choices_template += '({letter}) {{{entity_name}}}'.format(
letter=letter, entity_name=entity_name)
if entity is answer:
assert answer_choice is None
answer_choice = letter
assert answer_choice is not None
return entity_dict, choices_template, answer_choice
def _mark_choice_letters_used(count, context):
"""Marks the choice letters as used."""
for i in range(count):
context.mark_used(chr(ord('a') + i))
def _kth_biggest_list_question(context, entities, adjective, answer):
"""Ask for the biggest (or smallest, or second biggest, etc) in a list."""
entity_dict, values_template = _entities_to_list(entities)
question = example.question(
context, 'What is the {adjective} value in ' + values_template + '?',
adjective=adjective, **entity_dict)
return example.Problem(question=question, answer=answer.handle)
def _kth_biggest_multichoice_question(context, entities, adjective, answer):
"""Ask for the biggest (or smallest, or second biggest, etc) of choices."""
entity_dict, choices_template, answer_choice = _entities_to_choices(
entities, answer)
question = example.question(
context, 'Which is the {adjective} value?' + choices_template,
adjective=adjective, **entity_dict)
return example.Problem(question=question, answer=answer_choice)
def _entity_sort_key(entity):
return sympy.default_sort_key(entity.value)
def _sort_count_range(entropy):
min_ = 3
return min_, min_ + int(entropy/2)
def _unique_values(entropy, only_integers=False, count=None):
"""Generates unique values."""
if count is None:
count = random.randint(*_sort_count_range(entropy))
if only_integers:
sampler = functools.partial(number.integer, signed=True)
else:
sampler = integer_or_rational_or_decimal
for _ in range(1000):
entropies = entropy * np.random.dirichlet(np.ones(count))
entropies = np.maximum(1, entropies)
values = [sampler(ent) for ent in entropies]
if len(sympy.FiniteSet(*values)) == len(values):
return values
raise ValueError('Could not generate {} unique values with entropy={}'
.format(count, entropy))
def kth_biggest(sample_args, count=None):
"""Asks for the kth biggest value in a list."""
sample_args = sample_args()
context = composition.Context()
entropy, sample_args = sample_args.peel()
values = _unique_values(entropy, count=count)
count = len(values)
display_multichoice = random.choice([False, True])
if display_multichoice:
_mark_choice_letters_used(count, context)
entities = context.sample(sample_args, values)
sorted_entities = sorted(entities, key=_entity_sort_key)
ordinal = random.randint(1, count)
if random.choice([False, True]):
# Do from biggest.
answer = sorted_entities[-ordinal]
adjective = 'biggest'
else:
# Do from smallest.
answer = sorted_entities[ordinal - 1]
adjective = 'smallest'
if ordinal > 1:
adjective = str(display.StringOrdinal(ordinal)) + ' ' + adjective
if display_multichoice:
return _kth_biggest_multichoice_question(
context=context, entities=entities, adjective=adjective, answer=answer)
else:
return _kth_biggest_list_question(
context=context, entities=entities, adjective=adjective, answer=answer)
def _closest_in_list_question(context, entities, target, adjective, answer):
"""Ask for the closest to a given value in a list."""
entity_dict, values_template = _entities_to_list(entities)
question = example.question(
context,
'What is the {adjective} to {target} in ' + values_template + '?',
adjective=adjective, target=target, **entity_dict)
return example.Problem(question=question, answer=answer.handle)
def _closest_multichoice_question(context, entities, target, adjective, answer):
"""Ask for the closest to a given value in a set of choices."""
entity_dict, choices_template, answer_choice = _entities_to_choices(
entities, answer)
question = example.question(
context,
'Which is the {adjective} to {target}?' + choices_template,
adjective=adjective, target=target, **entity_dict)
return example.Problem(question=question, answer=answer_choice)
def _closest_count_range(entropy):
min_ = 3
return min_, min_ + int(entropy/3)
def closest(sample_args, count=None):
"""Ask for the closest to a given value in a list."""
sample_args = sample_args()
context = composition.Context()
entropy, sample_args = sample_args.peel()
if count is None:
count = random.randint(*_closest_count_range(entropy))
display_multichoice = random.choice([False, True])
if display_multichoice:
_mark_choice_letters_used(count, context)
entropy_target, entropy_list = entropy * np.random.dirichlet([1, count])
target = integer_or_rational_or_decimal(entropy_target)
while True:
value_entropies = entropy_list * np.random.dirichlet(np.ones(count))
value_entropies = np.maximum(1, value_entropies)
values = [integer_or_rational_or_decimal(ent) for ent in value_entropies]
differences = [abs(sympy.sympify(value) - target) for value in values]
if len(sympy.FiniteSet(*differences)) == count: # all differences unique
break
target_and_entities = context.sample(sample_args, [target] + values)
target = target_and_entities[0]
entities = target_and_entities[1:]
min_difference = min(differences)
answer_index = differences.index(min_difference)
answer = entities[answer_index]
adjective = random.choice(['closest', 'nearest'])
if display_multichoice:
return _closest_multichoice_question(
context=context, entities=entities, target=target, adjective=adjective,
answer=answer)
else:
return _closest_in_list_question(
context=context, entities=entities, target=target, adjective=adjective,
answer=answer)
def sort(sample_args, count=None):
"""Ask to sort numbers in increasing or decreasing order."""
sample_args = sample_args()
context = composition.Context()
entropy, sample_args = sample_args.peel()
# Sometimes just integers, to allow for more terms in a short space.
values = _unique_values(
entropy, only_integers=random.choice([False, True]), count=count)
entities = context.sample(sample_args, values)
unsorted_dict, unsorted_template = _entities_to_list(entities)
ascending = random.choice([False, True])
templates = [
'Sort ' + unsorted_template + ' in {direction} order.',
'Put ' + unsorted_template + ' in {direction} order.',
]
if ascending:
templates.append('Sort ' + unsorted_template + '.')
direction = random.choice(['ascending', 'increasing'])
else:
direction = random.choice(['descending', 'decreasing'])
template = random.choice(templates)
sorted_entities = sorted(
entities, key=_entity_sort_key, reverse=(not ascending))
answer = ''
for i, entity in enumerate(sorted_entities):
if i > 0:
answer += ', '
answer += str(entity.handle)
return example.Problem(
question=example.question(
context, template, direction=direction, **unsorted_dict),
answer=answer)
| mathematics_dataset-master | mathematics_dataset/modules/comparison.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.modules.algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.modules import algebra
from mathematics_dataset.sample import polynomials
from six.moves import range
import sympy
class AlgebraTest(absltest.TestCase):
def testPolynomialCoeffsWithRoots(self):
coeffs = algebra._polynomial_coeffs_with_roots([1, 2], scale_entropy=0.0)
self.assertEqual(coeffs, [2, -3, 1])
def testPolynomialRoots(self):
variable = sympy.Symbol('x')
for _ in range(10):
roots = random.sample(list(range(-9, 10)), 3)
coeffs = algebra._polynomial_coeffs_with_roots(roots, scale_entropy=10.0)
polynomial = polynomials.coefficients_to_polynomial(coeffs, variable)
calc_roots = sympy.polys.polytools.real_roots(polynomial)
self.assertEqual(calc_roots, sorted(roots))
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/modules/algebra_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Probability questions (sampling, independence, expectations, ...)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import random
import string
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.modules import train_test_split
from mathematics_dataset.util import combinatorics
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
from mathematics_dataset.util import probability
import numpy as np
from six.moves import range
from six.moves import zip
_LETTERS = string.ascii_lowercase
_MAX_FRAC_TRIVIAL_PROB = 0.1
# Maximum number of colours and objects in a bag.
_MAX_DISTINCT_LETTERS = 6
_MAX_TOTAL_LETTERS = 20
_MAX_LETTER_REPEAT = 10
_SWR_SAMPLE_COUNT = [2, 4]
_SWR_SAMPLE_COUNT_EXTRAPOLATE = [5, 5]
_GERUNDS = {
'pick': 'picking',
}
def _make_modules(is_train):
"""Returns modules, with split based on the boolean `is_train`."""
return {
'swr_p_sequence': functools.partial(
swr_prob_sequence, is_train=is_train, sample_range=_SWR_SAMPLE_COUNT),
'swr_p_level_set': functools.partial(
swr_prob_level_set, is_train=is_train,
sample_range=_SWR_SAMPLE_COUNT),
}
def train(entropy_fn):
"""Returns dict of training modules."""
del entropy_fn # unused
return _make_modules(is_train=True)
def test():
"""Returns dict of testing modules."""
return _make_modules(is_train=False)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
'swr_p_sequence_more_samples': functools.partial(
swr_prob_sequence, is_train=None,
sample_range=_SWR_SAMPLE_COUNT_EXTRAPOLATE),
'swr_p_level_set_more_samples': functools.partial(
swr_prob_level_set, is_train=None,
sample_range=_SWR_SAMPLE_COUNT_EXTRAPOLATE),
}
def _sequence_event(values, length, verb):
"""Returns sequence (finite product) event.
Args:
values: List of values to sample from.
length: Length of the sequence to generate.
verb: Verb in infinitive form.
Returns:
Instance of `probability.FiniteProductEvent`, together with a text
description.
"""
del verb # unused
samples = [random.choice(values) for _ in range(length)]
events = [probability.DiscreteEvent([sample]) for sample in samples]
event = probability.FiniteProductEvent(events)
sequence = ''.join(str(sample) for sample in samples)
event_description = 'sequence {sequence}'.format(sequence=sequence)
return event, event_description
def _word_series(words, conjunction='and'):
"""Combines the words using commas and the final conjunction."""
len_words = len(words)
if len_words == 0:
return ''
if len_words == 1:
return words[0]
return '{} {} {}'.format(', '.join(words[:-1]), conjunction, words[-1])
def _level_set_event(values, length, verb):
"""Generates `LevelSetEvent`; see _generate_sequence_event."""
counts = combinatorics.uniform_non_negative_integers_with_sum(
len(values), length)
counts_dict = dict(list(zip(values, counts)))
event = probability.CountLevelSetEvent(counts_dict)
shuffled_values = list(values)
random.shuffle(shuffled_values)
counts_and_values = [
'{} {}'.format(counts_dict[value], value)
for value in shuffled_values
if counts_dict[value] > 0
]
counts_and_values = _word_series(counts_and_values)
template = random.choice([
'{verbing} {counts_and_values}',
])
verbing = _GERUNDS[verb]
event_description = template.format(
counts_and_values=counts_and_values, verbing=verbing)
return event, event_description
LetterBag = collections.namedtuple(
'LetterBag',
('weights', 'random_variable', 'letters_distinct', 'bag_contents'))
def _sample_letter_bag(is_train, min_total):
"""Samples a "container of letters" and returns info on it."""
while True:
num_distinct_letters = random.randint(1, _MAX_DISTINCT_LETTERS)
num_letters_total = random.randint(
max(num_distinct_letters, min_total),
min(_MAX_TOTAL_LETTERS, num_distinct_letters * _MAX_LETTER_REPEAT))
letter_counts = combinatorics.uniform_positive_integers_with_sum(
num_distinct_letters, num_letters_total)
# Test/train split.
if (is_train is None
or train_test_split.is_train(sorted(letter_counts)) == is_train):
break
letters_distinct = random.sample(_LETTERS, num_distinct_letters)
weights = {i: 1 for i in range(num_letters_total)}
letters_with_repetition = []
for letter, count in zip(letters_distinct, letter_counts):
letters_with_repetition += [letter] * count
random.shuffle(letters_with_repetition)
random_variable = probability.DiscreteRandomVariable(
{i: letter for i, letter in enumerate(letters_with_repetition)})
if random.choice([False, True]):
bag_contents = ''.join(letters_with_repetition)
else:
letters_and_counts = [
'{}: {}'.format(letter, count)
for letter, count in zip(letters_distinct, letter_counts)]
bag_contents = '{' + ', '.join(letters_and_counts) + '}'
return LetterBag(
weights=weights,
random_variable=random_variable,
letters_distinct=letters_distinct,
bag_contents=bag_contents)
def _swr_space(is_train, sample_range):
"""Returns probability space for sampling without replacement."""
num_sampled = random.randint(*sample_range)
sample = _sample_letter_bag(is_train=is_train, min_total=num_sampled)
space = probability.SampleWithoutReplacementSpace(sample.weights, num_sampled)
random_variable = probability.FiniteProductRandomVariable(
[sample.random_variable] * num_sampled)
random_variable.description = (
str(display.StringNumber(num_sampled))
+ ' letters picked without replacement from '
+ sample.bag_contents)
return sample.letters_distinct, space, random_variable
def _sample_without_replacement_probability_question(
is_train, event_fn, sample_range):
"""Question for prob of some event when sampling without replacement."""
def too_big(event_in_space):
if isinstance(event_in_space, probability.SequenceEvent):
size = len(event_in_space.all_sequences())
else:
assert isinstance(event_in_space, probability.FiniteProductEvent)
size = np.prod([len(event.values) for event in event_in_space.events])
return size > int(2e5)
allow_trivial_prob = random.random() < _MAX_FRAC_TRIVIAL_PROB
while True:
distinct_letters, space, random_variable = _swr_space(
is_train, sample_range)
event, event_description = event_fn(
values=distinct_letters, length=space.n_samples, verb='pick')
event_in_space = random_variable.inverse(event)
if too_big(event_in_space):
continue
answer = space.probability(event_in_space)
if answer not in [0, 1] or allow_trivial_prob:
break
context = composition.Context()
template = random.choice([
'{random_variable_capitalize}. What is prob of {event}?',
'{random_variable_capitalize}. Give prob of {event}.',
'What is prob of {event} when {random_variable}?',
'Calculate prob of {event} when {random_variable}.',
])
question = example.question(
context,
template,
random_variable=random_variable.description,
random_variable_capitalize=(
str(random_variable.description).capitalize()),
event=event_description)
return example.Problem(question, answer)
def swr_prob_sequence(is_train, sample_range):
"""Probability of given sequence when sampling without replacement."""
return _sample_without_replacement_probability_question(
is_train=is_train, event_fn=_sequence_event, sample_range=sample_range)
def swr_prob_level_set(is_train, sample_range):
"""Probability of given level set when sampling without replacement."""
return _sample_without_replacement_probability_question(
is_train=is_train, event_fn=_level_set_event, sample_range=sample_range)
| mathematics_dataset-master | mathematics_dataset/modules/probability.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The various mathematics modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mathematics_dataset.modules import algebra
from mathematics_dataset.modules import arithmetic
from mathematics_dataset.modules import calculus
from mathematics_dataset.modules import comparison
from mathematics_dataset.modules import measurement
from mathematics_dataset.modules import numbers
from mathematics_dataset.modules import polynomials
from mathematics_dataset.modules import probability
import six
all_ = {
'algebra': algebra,
'arithmetic': arithmetic,
'calculus': calculus,
'comparison': comparison,
'measurement': measurement,
'numbers': numbers,
'polynomials': polynomials,
'probability': probability,
}
def train(entropy_fn):
"""Returns dict of training modules."""
return {
name: module.train(entropy_fn) for name, module in six.iteritems(all_)
}
def test():
"""Returns dict of testing modules."""
return {name: module.test() for name, module in six.iteritems(all_)}
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {name: module.test_extra() for name, module in six.iteritems(all_)}
| mathematics_dataset-master | mathematics_dataset/modules/modules.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.modules.calculus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from mathematics_dataset.modules import calculus
import tensorflow as tf
class CalculusTest(tf.test.TestCase):
def testSampleIntegrand(self):
# y + 2*x + 3*x**2
coefficients = [[0, 1], [2, 0], [3, 0]]
derivative_order = 1
derivative_axis = 0
# const + x*y + x**2 + x**3
expected = [[0, 1], [1, 0], [1, 0]]
entropy = 4
result = calculus._sample_integrand(
coefficients, derivative_order, derivative_axis, entropy)
result = result[1:, :] # ignore random constant terms
self.assertAllEqual(result, expected)
if __name__ == '__main__':
tf.test.main()
| mathematics_dataset-master | mathematics_dataset/modules/calculus_test.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for train/test split based on hash value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
def is_train(value):
"""Returns whether `value` should be used in a training question."""
value_as_string = str(value).encode('utf-8')
return int(hashlib.md5(value_as_string).hexdigest(), 16) % 2 == 0
| mathematics_dataset-master | mathematics_dataset/modules/train_test_split.py |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.modules.arithmetic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.modules import arithmetic
import sympy
class ArithmeticTest(absltest.TestCase):
def testSurdCoefficients(self):
exp = sympy.sympify('1')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1, 0))
exp = sympy.sympify('1/2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1/2, 0))
exp = sympy.sympify('sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 1))
exp = sympy.sympify('3*sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 3))
exp = sympy.sympify('3*sqrt(5)/2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 3/2))
exp = sympy.sympify('1 + 3 * sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1, 3))
exp = sympy.sympify('1/2 + 3 * sqrt(5) / 2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1/2, 3/2))
exp = sympy.sympify('sqrt(2)/(-1 + 2*sqrt(2))**2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(8/49, 9/49))
if __name__ == '__main__':
absltest.main()
| mathematics_dataset-master | mathematics_dataset/modules/arithmetic_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'dm_aux', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `dm_aux/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='dm_aux',
version=_get_version(),
url='https://github.com/deepmind/dm_aux',
license='Apache 2.0',
author='DeepMind',
description=('AUX: Simple audio signal processing, in JAX!'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='jax audio python machine learning',
packages=find_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
include_package_data=True,
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Audio Processing',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| dm_aux-main | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for audio signal processing."""
import chex
import jax.numpy as jnp
def mu_law(wav: chex.Array,
mu: int = 255,
quantize: bool = True) -> chex.Array:
"""mu-law companding with optional quantization to `log2(mu + 1)` bits.
https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
Args:
wav: Input wav signal, whose values should be in the range of [-1, +1].
mu: The compression number. Default to 255.
quantize: Whether to quantize the compressed values to `mu + 1` integer
numbers.
Returns:
mu-law compressed wav.
Raises:
ValueError if `mu` is not positive.
"""
if mu <= 0:
raise ValueError(f'Parameter mu should be positive, got {mu}.')
mu_wav = jnp.sign(wav) * jnp.log(1 + mu * abs(wav)) / jnp.log(1 + mu)
# Clipping the `mu_wav` to avoid numerical inaccuracy on hardware accerlators.
mu_wav = jnp.clip(mu_wav, -1.0, 1.0)
if not quantize:
return mu_wav
bins = jnp.linspace(-1, 1, mu + 1, endpoint=True)
q_wav = jnp.digitize(mu_wav, bins=bins, right=True) - (mu + 1) // 2
return q_wav.astype(jnp.int32)
def inv_mu_law(compressed_wav: chex.Array,
mu: int = 255,
quantize: bool = True) -> chex.Array:
"""mu-law expansion.
https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
Args:
compressed_wav: Input compressed signal.
mu: The compression number. Default to 255.
quantize: Whether `compressed_wav` is `log2(mu + 1)`-bit quantized.
Returns:
Mu-law expanded version of `compressed_wav` in [-1, +1].
Raises:
ValueError if `mu` is not positive.
"""
if mu <= 0:
raise ValueError(f'Parameter mu should be positive, getting {mu}.')
compressed_wav = compressed_wav.astype(jnp.float32)
mu_wav = compressed_wav * 2.0 / (mu + 1) if quantize else compressed_wav
return jnp.sign(mu_wav) / mu * (jnp.power(mu + 1, jnp.abs(mu_wav)) - 1)
def power_to_db(power: chex.Array,
ref: float = 1.0,
amin: float = 1e-10) -> chex.Array:
"""Converts a power spectrogram to decibel (dB), i.e. `10 * log10(power/ref)`.
Args:
power: Input power.
ref: The reference value to scale the input.
amin: The minimum value for `power` and/or `ref`.
Returns:
Input `power` in dB.
"""
# Stable version of 10 * log10(power / ref_value)
log_power = 10.0 * jnp.log10(jnp.maximum(amin, power))
log_power -= 10.0 * jnp.log10(jnp.maximum(amin, ref))
return log_power
def db_to_power(log_power: chex.Array, ref: float = 1.0) -> chex.Array:
"""Converts a spectrogram in dB to its power form.
Equivalent to `ref*10**(log_power/10)`.
Args:
log_power: Input power spectrogram in dB.
ref: The reference value to scale the output.
Returns:
Power spectrogram.
"""
return ref * jnp.power(10.0, 0.1 * log_power)
def amplitude_to_db(amplitude: chex.Array,
ref: float = 1.0,
amin: float = 1e-5) -> chex.Array:
"""Converts an amplitude spectrogram to decibel (dB).
Equivalent to `power_to_db(amplitude**2)`.
Args:
amplitude: Input amplitude spectrogram.
ref: The reference value to scale the input.
amin: The minimum value for `amplitude` and/or `ref`.
Returns:
Input `amplitude` in dB.
"""
return power_to_db(
jnp.square(amplitude), ref=ref**2, amin=amin**2)
def db_to_amplitude(log_power: chex.Array, ref: float = 1.0) -> chex.Array:
"""Converts a spectrogram in dB to an amplitude spectrogram.
Equivalent to `power_to_db(x) ** 0.5`.
Args:
log_power: Input power spectrogram in dB.
ref: The reference value to scale the output.
Returns:
Amplitude spectrogram.
"""
return jnp.sqrt(db_to_power(log_power, ref=ref**2))
| dm_aux-main | dm_aux/transforms.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_aux.spectral."""
import functools
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
import chex
from dm_aux import spectral
import jax
import librosa
import numpy as np
import scipy
import tensorflow as tf
Pad = spectral.Pad
class SpectralTest(parameterized.TestCase):
def _get_precision(self):
if jax.local_devices()[0].platform == 'tpu':
# We need higher precisions when running on TPUs to benchmark against
# librosa and tf.signal.
return jax.lax.Precision.HIGH
else:
return jax.lax.Precision.DEFAULT
@parameterized.named_parameters(
dict(testcase_name='base_test', data_length=4000, n_fft=320,
hop_length=160, win_length=320, window='hann'),
dict(testcase_name='hamming_window', data_length=4000, n_fft=320,
hop_length=160, win_length=320, window='hamming'),
dict(testcase_name='unequal_n_fft_and_win_length', data_length=4000,
n_fft=320, hop_length=160, win_length=160, window='hann'),
dict(testcase_name='longer_input', data_length=8000, n_fft=320,
hop_length=160, win_length=320, window='hann'),
)
def test_stft_matches_librosa(self, data_length, n_fft, hop_length,
win_length, window):
rng = np.random.default_rng(12345)
# Data
data_np = rng.uniform(-1, 1, data_length).astype(np.float32)
data = jax.device_put(data_np[None, ...])
# Librosa stft matrix
stft_matrix_np = librosa.core.stft(
y=data_np, n_fft=n_fft, hop_length=hop_length, win_length=win_length,
window=window, dtype=np.complex64, center=True).T
# dm-aux stft matrix
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn=window, pad=Pad.BOTH,
precision=self._get_precision(), pad_mode='reflect')
spectral_stft = jax.jit(functools.partial(
spectral.stft, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn=window, pad=Pad.BOTH,
precision=self._get_precision(), pad_mode='reflect'))
stft_matrix_jit = spectral_stft(signal=data)
np.testing.assert_allclose(stft_matrix[0], stft_matrix_np, rtol=1e-3,
atol=1e-3)
np.testing.assert_allclose(stft_matrix_jit[0], stft_matrix_np, rtol=1e-3,
atol=1e-3)
@parameterized.named_parameters(
dict(testcase_name='base_test', data_length=4000, n_fft=320,
hop_length=160, win_length=320),
dict(testcase_name='longer_input', data_length=8000, n_fft=320,
hop_length=160, win_length=320),
dict(testcase_name='bigger_window', data_length=4000, n_fft=640,
hop_length=320, win_length=640),
)
def test_stft_matches_tf_signal(self, data_length, n_fft, hop_length,
win_length):
rng = np.random.default_rng(12345)
# Data
batch_size = 16
data_np = rng.uniform(-1, 1, [batch_size, data_length]).astype(np.float32)
data_tf = tf.convert_to_tensor(data_np)
data = jax.device_put(data_np)
# tensorflow stft matrix
stft_matrix_tf = tf.signal.stft(
data_tf, frame_length=win_length, frame_step=hop_length,
fft_length=n_fft, window_fn=tf.signal.hann_window, pad_end=True)
# dm-aux stft matrix
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn='hann',
precision=self._get_precision(), pad=Pad.END)
# dm-aux stft matrix with jit
spectral_stft = jax.jit(functools.partial(
spectral.stft, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn='hann',
precision=self._get_precision(), pad=Pad.END))
stft_matrix_jit = spectral_stft(signal=data)
np.testing.assert_allclose(stft_matrix, stft_matrix_tf.numpy(), rtol=1e-2,
atol=1e-3)
np.testing.assert_allclose(stft_matrix_jit, stft_matrix_tf.numpy(),
rtol=1e-2, atol=1e-3)
@parameterized.named_parameters(
dict(testcase_name='base_test', data_length=16000, n_fft=320,
hop_length=160, win_length=320),
dict(testcase_name='longer_input', data_length=32000, n_fft=320,
hop_length=160, win_length=320),
dict(testcase_name='bigger_window', data_length=16000, n_fft=640,
hop_length=320, win_length=640),
)
def test_aligned_padding(self, data_length, n_fft, win_length, hop_length):
rng = np.random.default_rng(12345)
# Data
data_np = rng.uniform(-1, 1, (1, data_length))
data = jax.device_put(data_np)
# dm-aux stft matrix
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn='hamming', pad=Pad.ALIGNED,
pad_mode='constant')
self.assertEqual(stft_matrix.shape[1], data_length // hop_length)
@parameterized.named_parameters(
dict(testcase_name='higher_rate', n_fft=320, hop_length=160,
win_length=320, rate=1.3),
dict(testcase_name='lower_rate', n_fft=640, hop_length=320,
win_length=640, rate=0.7),
)
def test_phase_vocoder(self, n_fft, win_length, hop_length, rate):
rng = np.random.default_rng(12345)
data_length = 1600
# Data
data_np = rng.uniform(-1, 1, (1, data_length))
data = jax.device_put(data_np)
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn='hamming', pad=Pad.ALIGNED,
pad_mode='constant')
phase_vocoder_jit = jax.jit(functools.partial(
spectral.phase_vocoder, rate=rate, hop_length=hop_length))
stft_matrix_stretched = phase_vocoder_jit(stft_matrix)
stft_matrix_librosa_stretched = librosa.phase_vocoder(
stft_matrix[0].T, rate=rate, hop_length=hop_length).T[None, ...]
np.testing.assert_allclose(stft_matrix_stretched,
stft_matrix_librosa_stretched, rtol=1e-3)
self.assertEqual(stft_matrix_stretched.shape[1],
np.ceil(stft_matrix.shape[1] / rate))
@parameterized.named_parameters(
dict(testcase_name='base_test', data_length=8000, n_fft=320,
hop_length=160, win_length=320, window='hann'),
dict(testcase_name='hamming_window', data_length=8000, n_fft=320,
hop_length=160, win_length=320, window='hamming'),
dict(testcase_name='shorter_input', data_length=4000, n_fft=320,
hop_length=160, win_length=320, window='hann'),
)
def test_istft_matches_librosa(self, data_length, n_fft, hop_length,
win_length, window):
rng = np.random.default_rng(12345)
data_np = rng.uniform(-1, 1, data_length).astype(np.float32)
data = jax.device_put(data_np[None, ...])
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn=window, pad=Pad.BOTH,
precision=self._get_precision(), pad_mode='reflect')
# Librosa iSTFT
reconst_data_np = librosa.core.istft(
np.array(stft_matrix)[0].T, hop_length=hop_length,
win_length=win_length, window=window, center=True)
# DM-AUX iSTFT
reconst_data = spectral.istft(
stft_matrix=stft_matrix, frame_length=win_length,
frame_step=hop_length, window_fn=window,
precision=self._get_precision(), pad=Pad.BOTH)
np.testing.assert_allclose(reconst_data[0], reconst_data_np, rtol=1e-5,
atol=1e-4)
# Test jit.
istft_jit = jax.jit(functools.partial(
spectral.istft, frame_length=win_length, frame_step=hop_length,
window_fn=window, precision=self._get_precision(), pad=Pad.BOTH))
reconst_data_jit = istft_jit(stft_matrix)
np.testing.assert_allclose(reconst_data, reconst_data_jit, rtol=1e-5,
atol=1e-4)
@parameterized.named_parameters(
dict(testcase_name='hamming_window', data_length=32000, n_fft=2048,
hop_length=1024, win_length=2048, window='hamming', pad=Pad.START),
dict(testcase_name='hann_window', data_length=16000, n_fft=320,
hop_length=160, win_length=320, window='hann', pad=Pad.BOTH),
)
def test_istft_reconstruction(self, data_length, n_fft, hop_length,
win_length, window, pad):
rng = np.random.default_rng(12345)
data_np = rng.uniform(-1, 1, data_length)
data = jax.device_put(data_np[None, ...])
stft_matrix = spectral.stft(
signal=data, n_fft=n_fft, frame_length=win_length,
frame_step=hop_length, window_fn=window, pad=pad,
precision=self._get_precision(), pad_mode='reflect')
reconst_data = spectral.istft(
stft_matrix=stft_matrix, frame_length=win_length, frame_step=hop_length,
window_fn=window, pad=Pad.START, precision=self._get_precision(),
length=data_length)
self.assertTrue(
np.allclose(reconst_data[0], data_np[:reconst_data.size], atol=1e-3))
@parameterized.named_parameters(
dict(testcase_name='base_test', data_length=1600, resample_length=800,
window=None, real=True),
dict(testcase_name='hamming_window', data_length=1600,
resample_length=800, window='hamming', real=True),
dict(testcase_name='complex_input', data_length=1600, resample_length=800,
window=None, real=False),
dict(testcase_name='longer_input', data_length=48000,
resample_length=16000, window=None, real=True),
)
def test_resample(self, data_length, resample_length, window, real):
rng = np.random.default_rng(12345)
data_shape = (2, data_length,)
# Data
if real:
data_np = rng.uniform(-1, 1, data_shape)
else:
data_np = (rng.uniform(-1, 1, data_shape) +
1j * rng.uniform(-1, 1, data_shape))
data = jax.device_put(data_np)
# Test correctness against scipy.
resampled_data = spectral.resample(
data, num=resample_length, axis=1, window=window)
resampled_data_sp = scipy.signal.resample(
data, num=resample_length, axis=1, window=window)
np.testing.assert_allclose(resampled_data, resampled_data_sp, atol=1e-6)
# Test jit.
resample_jit = jax.jit(functools.partial(
spectral.resample, num=resample_length, axis=1, window=window))
resampled_data_jit = resample_jit(data)
np.testing.assert_allclose(resampled_data, resampled_data_jit, atol=1e-6)
@parameterized.named_parameters(
dict(testcase_name='spectrogram', data_length=8000,
spectrogram_type='spectrogram', hop_length=160, win_length=320,
num_features=128),
dict(testcase_name='logmf', data_length=4000,
spectrogram_type='logmf', hop_length=160, win_length=640,
num_features=80),
dict(testcase_name='mfcc', data_length=4000,
spectrogram_type='mfcc', hop_length=320, win_length=640,
num_features=64),
)
def test_spectrogram_matches_tf_signal(self, spectrogram_type, data_length,
hop_length, win_length, num_features):
rng = np.random.default_rng(12345)
batch_size = 16
sample_rate = 16000
lower_edge_hertz = 80.0
upper_edge_hertz = 7600.0
# Data
data_np = rng.uniform(-1, 1, [batch_size, data_length])
data_tf = tf.convert_to_tensor(data_np, dtype=tf.float32)
data = jax.device_put(data_np)
# Tensorflow spectrogram
spectrogram_tf = _extract_spectrogram_tf(
data_tf,
sample_rate,
spectrogram_type,
win_length,
hop_length,
num_features,
lower_edge_hertz,
upper_edge_hertz,
).numpy()
# dm-aux spectrogram
extract_spectrogram = functools.partial(
_extract_spectrogram,
sample_rate=sample_rate,
spectrogram_type=spectrogram_type,
power=1.0,
frame_length=win_length,
frame_step=hop_length,
num_features=num_features,
lower_edge_hertz=lower_edge_hertz,
upper_edge_hertz=upper_edge_hertz,
window_fn='hann',
pad=Pad.END,
precision=self._get_precision())
spectrogram = extract_spectrogram(data)
# dm-aux spectrogram with jit
extract_spectrogram_jit = jax.jit(extract_spectrogram)
spectrogram_jit = extract_spectrogram_jit(waveform=data)
atol = 1e-2 if spectrogram_type != 'logmf' else 1e-1
np.testing.assert_allclose(spectrogram, spectrogram_tf, atol=atol)
np.testing.assert_allclose(spectrogram_jit, spectrogram_tf, atol=atol)
def _extract_spectrogram(
waveform: chex.Array,
sample_rate: int,
spectrogram_type: str,
power: float,
frame_length: int,
frame_step: int,
num_features: int,
lower_edge_hertz: float,
upper_edge_hertz: float,
window_fn: str,
pad: Pad,
precision: Optional[jax.lax.Precision] = None) -> chex.Array:
"""Extracts spectrograms using AUX."""
assert spectrogram_type in ['spectrogram', 'logmf', 'mfcc']
spectrograms = spectral.spectrogram(
waveform=waveform, power=power, frame_length=frame_length,
frame_step=frame_step, num_features=None, window_fn=window_fn,
precision=precision, pad=pad)
if spectrogram_type == 'spectrogram':
return spectrograms[..., :num_features]
log_mel_spectrograms = spectral.mel_spectrogram(
spectrograms=spectrograms,
log_scale=True,
sample_rate=sample_rate,
frame_length=frame_length,
num_features=num_features,
lower_edge_hertz=lower_edge_hertz,
upper_edge_hertz=upper_edge_hertz)
if spectrogram_type == 'logmf':
return log_mel_spectrograms
return spectral.mfcc(log_mel_spectrograms, num_mfcc_features=13)
def _extract_spectrogram_tf(
waveform: tf.Tensor,
sample_rate: int,
spectrogram_type: str,
frame_length: int,
frame_step: int,
num_features: int,
lower_edge_hertz: float,
upper_edge_hertz: float,
) -> tf.Tensor:
"""Extracts spectrograms using TensorFlow."""
# tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms
stfts = tf.signal.stft(waveform,
frame_length=frame_length,
frame_step=frame_step,
fft_length=frame_length,
window_fn=tf.signal.hann_window,
pad_end=True)
spectrograms = tf.abs(stfts)
if spectrogram_type == 'spectrogram':
return spectrograms[..., :num_features]
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1]
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_features, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
if spectrogram_type == 'logmf':
return log_mel_spectrograms
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
return mfccs
if __name__ == '__main__':
absltest.main()
| dm_aux-main | dm_aux/spectral_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_aux.transforms."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_aux import transforms
import jax
import librosa
import numpy as np
rng = np.random.default_rng(42)
_SPEC_SHAPE = (96, 64)
_RAND_SPEC = rng.uniform(size=(2,) + _SPEC_SHAPE).astype(np.float32)
class TransformsTest(parameterized.TestCase):
def _test_jit(self, x, jax_fn, tol=1e-4):
jax_fn_jitted = jax.jit(jax_fn)
jax_out = jax_fn(x)
jit_out = jax_fn_jitted(x)
if jax.default_backend() == 'tpu':
# On TPU we need a lower tolerance level due to the use of bf16 format.
tol = np.sqrt(tol)
np.testing.assert_allclose(jax_out, jit_out, rtol=tol, atol=tol)
@parameterized.parameters(16, 32)
def test_mu_law(self, input_length):
x = np.linspace(-1, 1, num=input_length)
# mu-law without quantization
y_librosa = librosa.mu_compress(x, quantize=False)
y = transforms.mu_law(x, quantize=False)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(y_librosa, y, atol=1e-10, rtol=rtol)
# mu-law with quantization
y_quant_librosa = librosa.mu_compress(x, quantize=True)
y_quant = transforms.mu_law(x, quantize=True)
np.testing.assert_allclose(y_quant_librosa, y_quant)
# Test jit
mu_law = lambda x: transforms.mu_law(x, quantize=False)
mu_law_quant = lambda x: transforms.mu_law(x, quantize=True)
self._test_jit(x, mu_law)
self._test_jit(x, mu_law_quant)
@parameterized.parameters(16, 32)
def test_inv_mu_law(self, input_length):
x = np.linspace(-1, 1, num=input_length)
# inv-mu-law without quantization
y = transforms.mu_law(x, quantize=False)
y_expand_librosa = librosa.mu_expand(y, quantize=False)
y_expand = transforms.inv_mu_law(y, quantize=False)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(
y_expand_librosa, y_expand, atol=1e-10, rtol=rtol)
np.testing.assert_allclose(y_expand, x, atol=1e-10, rtol=rtol)
# inv-mu-law with quantization
y_quant = transforms.mu_law(x, quantize=True)
y_expand_quant_librosa = librosa.mu_expand(y_quant, quantize=True)
y_expand_quant = transforms.inv_mu_law(y_quant, quantize=True)
np.testing.assert_allclose(y_expand_quant_librosa, y_expand_quant,
rtol=1e-5, atol=1e-5)
# Test jit
inv_mu_law = lambda x: transforms.inv_mu_law(x, quantize=False)
inv_mu_law_quant = lambda x: transforms.inv_mu_law(x, quantize=True)
self._test_jit(y, inv_mu_law)
self._test_jit(y_quant, inv_mu_law_quant)
def test_power_to_db(self):
spec = _RAND_SPEC
spec_db_librosa = librosa.power_to_db(spec)
spec_db = transforms.power_to_db(spec)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(spec_db_librosa, spec_db, rtol=rtol)
# Test jit
self._test_jit(spec, transforms.power_to_db)
def test_db_to_power(self):
spec_db = _RAND_SPEC
spec_librosa = librosa.db_to_power(spec_db)
spec = transforms.db_to_power(spec_db)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(spec_librosa, spec, rtol=rtol)
# Test jit
self._test_jit(spec_db, transforms.db_to_power)
def test_amplitude_to_db(self):
spec = _RAND_SPEC + 0.01
spec_db_librosa = librosa.amplitude_to_db(spec)
spec_db = transforms.amplitude_to_db(spec)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(spec_db_librosa, spec_db, rtol=rtol)
# Test jit
self._test_jit(spec, transforms.amplitude_to_db)
def test_db_to_amplitude(self):
spec_db = _RAND_SPEC
spec_librosa = librosa.db_to_amplitude(spec_db)
spec = transforms.db_to_amplitude(spec_db)
rtol = 1E-2 if jax.default_backend() == 'tpu' else 1E-5
np.testing.assert_allclose(spec_librosa, spec, rtol=rtol)
# Test jit
self._test_jit(spec_db, transforms.db_to_power)
if __name__ == '__main__':
absltest.main()
| dm_aux-main | dm_aux/transforms_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AUX public APIs."""
from dm_aux import augment
from dm_aux import spectral
from dm_aux import transforms
__version__ = "0.0.1"
__all__ = (
"augment",
"spectral",
"transforms",
)
| dm_aux-main | dm_aux/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_aux.augment."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_aux import augment
import jax
import jax.numpy as jnp
import numpy as np
rng = np.random.default_rng(12345)
_WAVE_SHAPE = (16000,)
_SPEC_SHAPE = (96, 64)
_RAND_WAVE = rng.uniform(-1., 1., size=(8,) + _WAVE_SHAPE).astype(np.float32)
_RAND_SPEC = rng.uniform(-1., 1., size=(8,) + _SPEC_SHAPE).astype(np.float32)
class AugmentTest(parameterized.TestCase):
def _test_jit(self, audio, jax_fn, tol=1e-4):
jax_fn_jitted = jax.jit(jax_fn)
augmented_jax = jax_fn(audio)
augmented_jit = jax_fn_jitted(audio)
if jax.local_devices()[0].platform == 'tpu':
# On TPU we need a lower tolerance level due to the use of bf16 format.
tol = np.sqrt(tol)
np.testing.assert_allclose(augmented_jax, augmented_jit, rtol=tol, atol=tol)
@parameterized.named_parameters(('waveform', _RAND_WAVE),
('spectrogram', _RAND_SPEC))
def test_additive_gaussian(self, audio):
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.additive_gaussian(key, x, -50)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self._test_jit(audio, augment_fn)
def test_waveform_masking(self):
waveform = _RAND_WAVE
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.waveform_masking(key, x, 640, 2)
augmented_waveform = augment_fn(waveform)
self.assertNotEqual((augmented_waveform == 0).sum(), 0)
self._test_jit(waveform, augment_fn)
def test_spec_augment(self):
spectrogram = _RAND_SPEC
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.spec_augment(key, x, 640, 2, 4, 2)
augmented_spectrogram = augment_fn(spectrogram)
self.assertNotEqual((augmented_spectrogram == 0).sum(), 0)
self._test_jit(spectrogram, augment_fn)
@parameterized.parameters([dict(audio=_RAND_WAVE), dict(audio=_RAND_SPEC)])
def test_audio_mixing(self, audio):
batch_size = audio.shape[0]
dtype = audio.dtype
key = jax.random.PRNGKey(0)
key1, key2 = jax.random.split(key)
mix_lambda = jax.random.beta(
key1, shape=[batch_size], dtype=dtype, a=5.0, b=2.0)
augment_fn = lambda x: augment.audio_mixing(key2, x, mix_lambda)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self._test_jit(audio, augment_fn)
def test_random_polarity_flipping(self):
audio = _RAND_WAVE
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.random_polarity_flipping(key, x, 1.0)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self.assertEqual((audio + augmented_audio).sum(), 0)
self._test_jit(audio, augment_fn)
@parameterized.named_parameters(('waveform', _RAND_WAVE),
('spectrogram', _RAND_SPEC))
def test_time_jitter(self, audio):
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.time_jitter(key, x, 10)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self.assertFalse(jnp.array_equal(augmented_audio, audio))
self._test_jit(audio, augment_fn)
def test_freq_jitter(self):
audio = _RAND_SPEC
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.freq_jitter(key, x, 10)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self.assertFalse(jnp.array_equal(augmented_audio, audio))
self.assertFalse(jnp.array_equal(augmented_audio, audio))
self._test_jit(audio, augment_fn)
@parameterized.named_parameters(('fast', 1.3), ('slow', 0.7))
def test_time_stretch(self, rate):
audio = _RAND_SPEC + 1j * _RAND_SPEC
augment_fn = lambda x: augment.time_stretch(x, rate)
augmented_audio = augment_fn(audio)
self.assertEqual(augmented_audio.shape[1], np.ceil(audio.shape[1] / rate))
self._test_jit(audio, augment_fn, 1e-2)
def test_preemphasis(self):
audio = _RAND_WAVE
augment_fn = lambda x: augment.preemphasis(x, coef=0.97)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self._test_jit(audio, augment_fn)
def test_random_time_warp(self):
audio = _RAND_SPEC
key = jax.random.PRNGKey(0)
augment_fn = lambda x: augment.random_time_warp(key, x, 10)
augmented_audio = augment_fn(audio)
self.assertListEqual(list(audio.shape), list(augmented_audio.shape))
self.assertFalse(jnp.array_equal(augmented_audio, audio))
self._test_jit(audio, augment_fn)
if __name__ == '__main__':
absltest.main()
| dm_aux-main | dm_aux/augment_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Waveform and spectrogram augmentation functions.
Waveform augmentations:
- waveform_masking
- random_polarity_flipping
- preemphasis
Spectrogram augmentations:
- spec_augment
- freq_jitter
- time_stretch
- random_time_warp
Augmentations that can be used for both waveforms and spectrograms:
- additive_gaussian
- audio_mixing
- time_jitter
"""
import functools
from typing import Optional
import chex
from dm_aux import spectral
import jax
import jax.numpy as jnp
import scipy.signal
################################################################################
# Waveform augmentations
################################################################################
def waveform_masking(
key: chex.PRNGKey,
waveform: chex.Array,
max_stripe_length: int,
num_stripes: int) -> chex.Array:
"""Randomly masks stripes along the time dimension of a waveform.
Args:
key: random key.
waveform: input waveform to be augmented of shape [batch_size, time_steps].
max_stripe_length: the length of each mask stripe.
num_stripes: the number of stripes.
Returns:
Augmented waveform.
"""
return _drop_stripes(key, waveform, max_stripe_length, num_stripes, axis=1)
def random_polarity_flipping(
key: chex.PRNGKey,
waveform: chex.Array,
flip_prob: float = 0.5) -> chex.Array:
"""Randomly flips the polarity of the `waveform`.
Args:
key: random key.
waveform: input waveform of shape [batch_size, ...].
flip_prob: the probability of flipping the waveform.
Returns:
Augmented waveform.
"""
batch_size = waveform.shape[0]
num_dims = len(waveform.shape)
shape = [batch_size] + [1] * (num_dims - 1)
sign = (jax.random.uniform(key, shape=shape) > flip_prob).astype(
waveform.dtype)
return (sign * 2 - 1) * waveform
def preemphasis(waveform: chex.Array, coef: float = 0.97) -> chex.Array:
"""Scales up the high frequency components in the `waveform`.
Args:
waveform: Input waveform of shape [..., time_steps].
coef: Pre-emphasis coefficient.
Returns:
Pre-emphasized waveform.
"""
return jnp.concatenate([
waveform[..., :1], waveform[..., 1:] - coef * waveform[..., :-1],
], axis=-1)
################################################################################
# Spectrogram augmentations
################################################################################
def spec_augment(
key: chex.PRNGKey,
spectrogram: chex.Array,
max_time_stripe_length: int,
num_time_stripes: int,
max_freq_stripe_length: int,
num_freq_stripes: int) -> chex.Array:
"""Randomly applies the time and frequency mask stripes to a spectrogram.
Aka, SpecAugment:
Park, D.S., Chan, W., Zhang, Y., Chiu, C.C., Zoph, B., Cubuk, E.D.
and Le, Q.V., 2019. Specaugment: A simple data augmentation method
for automatic speech recognition. arXiv preprint arXiv:1904.08779.
Args:
key: random key.
spectrogram: spectrogram to be augmented of shape [batch_size, time_steps,
num_bins].
max_time_stripe_length: the length of each mask stripe on the time
dimension.
num_time_stripes: the number of time stripes.
max_freq_stripe_length: the length of each mask stripe on the frequency
dimension.
num_freq_stripes: the number of frequency stripes.
Returns:
Augmented spectrogram.
"""
key1, key2 = jax.random.split(key)
spectrogram = _drop_stripes(
key1, spectrogram, max_time_stripe_length, num_time_stripes, axis=1)
return _drop_stripes(
key2, spectrogram, max_freq_stripe_length, num_freq_stripes, axis=2)
def freq_jitter(
key: chex.PRNGKey,
spectrogram: chex.Array,
max_amount: int,
pad_mode: Optional[str] = 'constant') -> chex.Array:
"""Randomly jitter the `spectrogram` along the frequency dimension.
Args:
key: random key.
spectrogram: input spectrogram of shape [batch_size, time_steps,
num_freq_bins, ...].
max_amount: max steps of freq jittering.
pad_mode: the mode of `jax.numpy.pad` method. Used to define the values of
the padded part.
Returns:
Augmented spectrogram.
"""
return jax.vmap(functools.partial(
_jitter, max_amount=max_amount, axis=1, pad_mode=pad_mode))(
key=jax.random.split(key, num=spectrogram.shape[0]),
audio=spectrogram)
def time_stretch(
spectrogram: chex.Array,
fixed_rate: float = 1.0,) -> chex.Array:
"""Stretches spectrogram in time without changing the pitch.
Args:
spectrogram: input complex spectrogram.
fixed_rate: rate of time stretch. Default to 1 which means no change.
Returns:
Stretched complex spectrogram.
"""
return spectral.phase_vocoder(spectrogram, fixed_rate)
def random_time_warp(
key: chex.PRNGKey,
spectrogram: chex.Array,
sigma: float = 1.0,
scale: float = 1.0,
taper_off: bool = True) -> chex.Array:
"""Randomly warps a spectrogram along time with a Gaussian displacement field.
Args:
key: random key.
spectrogram: input spectrogram of shape [batch_size, time_steps,
num_freq_bins].
sigma: the standard deviation of the Gaussian displacement field.
scale: the scaling constant for the displacement field.
taper_off: whether to taper off the displacement field.
Returns:
Randomly warped spectrogram.
"""
# Displacement field.
length = spectrogram.shape[1]
field = jax.random.uniform(key, (length,), minval=-1.0, maxval=1.0)
field = jnp.convolve(field, _gaussian_kernel(sigma), mode='same') * scale
if taper_off:
field *= scipy.signal.get_window('hann', length, fftbins=True)
def _warp(x, field):
"""Warps a one-dimensional signal with a given displacement field."""
assert x.shape == field.shape
length = x.shape[0]
coords = jnp.arange(length) + field
return jax.scipy.ndimage.map_coordinates(
x, coords[None], order=1, mode='constant')
# Vmap the warping along the batch and frequency dimension.
return jax.vmap(jax.vmap(
functools.partial(_warp, field=field), in_axes=1, out_axes=1))(
spectrogram)
################################################################################
# Augmentations can be used for both waveforms and spectrograms
################################################################################
def additive_gaussian(
key: chex.PRNGKey,
audio: chex.Array,
noise_level_in_db: chex.Numeric,
) -> chex.Array:
"""Augments the audio with additive white Gaussian noise.
Args:
key: random key.
audio: input waveform to be augmented.
noise_level_in_db: the standard deviation of the noise in dB, normalized to
the maximum value in audio.
Returns:
Augmented waveform.
"""
noise = jax.random.normal(key, shape=audio.shape)
noise_level = 10. ** (noise_level_in_db / 20.) * jnp.abs(audio).max()
return audio + noise_level * noise
def audio_mixing(
key: chex.PRNGKey,
audio: chex.Array,
mix_lambda: chex.Array) -> chex.Array:
r"""Randomly mixes two audio samples from the batch.
Given two samples x1 and x2, the augmented version of x1 is:
\bar{x1} = \lambda * x1 + (1 - \lambda) * x2,
where lambda is a random number in [0, 1].
Originally in:
H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, “mixup: Beyond
empirical risk minimization,” in International Conference on Learning
Representations (ICLR), 2018.
Args:
key: random key.
audio: input to be augmented, whose leading dimension is the batch size.
It returns audio if the batch size is 1.
mix_lambda: the mixing ratio lambda. It can be either a scalar or a vector
of length equal to the batch size of `audio`.
Returns:
Augmented audio.
"""
batch_size = audio.shape[0]
num_dims = len(audio.shape)
if batch_size == 1:
return audio
assert len(mix_lambda.shape) == 1, (
'mix_lambda should be a scalar or a vector, getting a '
f'{len(mix_lambda.shape)}-d array.')
assert len(mix_lambda) == batch_size, (
f'Length of mix_lambda ({len(mix_lambda)}) is not equal to the batch '
f'size ({batch_size})')
mix_lambda = jnp.reshape(mix_lambda, [batch_size] + [1] * (num_dims - 1))
random_inds = jax.random.permutation(key, jnp.arange(batch_size))
return audio * mix_lambda + audio[random_inds, ...] * (1.0 - mix_lambda)
def time_jitter(
key: chex.PRNGKey,
audio: chex.Array,
max_amount: int,
pad_mode: Optional[str] = 'constant') -> chex.Array:
"""Randomly jitters the `audio` along the time dimension.
Args:
key: random key.
audio: input audio of shape [batch_size, time_steps, ...].
max_amount: max steps of time jittering.
pad_mode: the mode of `jax.numpy.pad` method. Used to define the values of
the padded part.
Returns:
Augmented audio.
"""
time_jitter_fn = functools.partial(
_jitter, max_amount=max_amount, axis=0, pad_mode=pad_mode)
return jax.vmap(time_jitter_fn)(
key=jax.random.split(key, num=audio.shape[0]), audio=audio)
def _drop_stripes(
key: chex.PRNGKey,
x: chex.Array,
max_stripe_length: int,
num_stripes: int,
axis: int) -> chex.Array:
"""Randomly masks stripes along the `axis` dimension.
For example, below shows stripes along `axis=1`, with `max_stripe_length=4`
and `num_stripes=1`:
[[1, 1, 0, 0, 0, 1, 1, 1, 1,],
[1, 1, 1, 0, 1, 1, 1, 1, 1,],
[1, 1, 1, 1, 1, 1, 0, 0, 1,],
[0, 0, 0, 0, 1, 1, 1, 1, 1,]]
Args:
key: random key.
x: input to be augmented, whose leading dimension is the batch dimension.
max_stripe_length: the length of each mask stripe.
num_stripes: the number of stripes.
axis: the axis along which masks will be applied.
Returns:
Augmented x.
"""
batch_size = x.shape[0]
max_length = x.shape[axis]
num_dims = len(x.shape)
def _mask(key: chex.PRNGKey) -> chex.Array:
key1, key2 = jax.random.split(key)
shape = [batch_size] + [1] * (num_dims - 1)
stripe_length = jax.random.randint(
key1, shape=shape, minval=0, maxval=max_stripe_length)
start = jax.random.randint(
key2, shape=shape, minval=0, maxval=max_length - stripe_length)
mask_shape = [1] * num_dims
mask_shape[axis] = max_length
mask = jnp.repeat(
jnp.arange(max_length).reshape(mask_shape), batch_size, axis=0)
return jnp.logical_not((mask > start) * (mask < start + stripe_length))
for _ in range(num_stripes):
key, subkey = jax.random.split(key)
x *= _mask(subkey)
return x
def _jitter(
key: chex.PRNGKey,
audio: chex.Array,
max_amount: int,
axis: int,
pad_mode: Optional[str] = 'constant') -> chex.Array:
"""Randomly jitters the `audio` along the `axis` dimension.
Args:
key: random key.
audio: input audio. If the leading dim is batch, it requires a vmap/pmap
for this method to work.
max_amount: max steps of the jitter along the `axis`.
axis: the dimension of `audio` to be jittered.
pad_mode: the mode of `jax.numpy.pad` method. Used to define the values of
the padded part.
Returns:
Augmented audio.
"""
num_dims = len(audio.shape)
pad_shape = [[0, 0]] * num_dims
pad_shape[axis] = [max_amount, max_amount]
padded_audio = jnp.pad(audio, pad_shape, mode=pad_mode)
offset = jax.random.randint(key, shape=(), minval=0, maxval=2*max_amount - 1)
start = [0] * num_dims
start[axis] = offset
return jax.lax.dynamic_slice(padded_audio, start, audio.shape)
def _gaussian_kernel(sigma: float) -> chex.Array:
"""Gaussian kernel."""
radius = int(4 * sigma + 0.5) # Heuristic taken from scipy.
x = jnp.arange(-radius, radius+1)
phi_x = jnp.exp(-0.5 / (sigma ** 2) * (x ** 2))
return phi_x / phi_x.sum()
| dm_aux-main | dm_aux/augment.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio spectral transformations."""
import enum
from typing import Optional, Union, Tuple
import chex
import jax
from jax import lax
import jax.numpy as jnp
import librosa
import numpy as np
import scipy
from scipy import signal as sp_signal
class Pad(enum.Enum):
NONE = 0
START = 1
END = 2
BOTH = 3
ALIGNED = 4
def stft(signal: chex.Array,
n_fft: int = 2048,
frame_length: Optional[int] = None,
frame_step: Optional[int] = None,
window_fn: Optional[Union[str, float, Tuple[str, float]]] = 'hann',
pad: Pad = Pad.END,
pad_mode: str = 'constant',
precision: Optional[jax.lax.Precision] = None,
) -> chex.Array:
"""Computes the Short-time Fourier Transform (STFT) of the signal.
https://en.wikipedia.org/wiki/Short-time_Fourier_transform
This function converts the time domain signal to the time-frequency domain by
computing discrete Fourier transformations (DFT) over frames of length
`frame_length` and stride `frame_step`. A window function can be applied to
remove discontinuities at the edges of the segment.
This function can be run on both CPUs and hardware accelerators. This
implementation leverages jax.lax.conv to extract the frames and compute FFTs,
as opposed to Librosa which does it entry-wise. In this way significant
speedup can be achieved on hardware accelerators.
Args:
signal: input signal of shape [batch_size, signal_len].
n_fft: length of the signal windows. Should equal to bigger than
`frame_length`. If it is bigger than `frame_length` the window will be
padded with zeros.
frame_length: the size of each signal frame. If unspecified it defaults to
be equal to `n_fft`.
frame_step: the hop size of extracting signal frames. If unspecified it
defaults to be equal to `int(frame_length // 2)`.
window_fn: applied to each frame to remove the discontinuities at the edge
of the frame introduced by segmentation. It is passed to
`scipy.signal.get_window` - see the oringal Scipy doc for more details
(docs.scipy.org/doc/scipy-1.7.1/reference/generated/scipy.signal
.get_window.html).
pad: pad the signal at the end(s) by `int(n_fft // 2)`. Can either be
`Pad.NONE`, `Pad.START`, `Pad.END`, `Pad.BOTH`, `Pad.ALIGNED`.
pad_mode: the mode of padding of the signal when `pad` is not None. It is a
string or `None` passed to `jax.numpy.pad`.
precision: precision of the convolution. Either `None`, which means the
default precision for the backend, a `lax.Precision` enum value
(`Precision.DEFAULT`, `Precision.HIGH` or `Precision.HIGHEST`) or a tuple
of two `lax.Precision` enums indicating precision of `lhs` and `rhs`. For
more details see the doc of `lax.conv`.
Returns:
The time-frequency representation of the signal of shape
`[batch_size, num_frames, n_fft/2]`, in which `num_frames` is computed
from the length of the signal and `step_size`.
"""
signal_length = signal.shape[1]
if len(signal.shape) != 2:
raise ValueError('Input signal should be 2-dimensional.')
if frame_length is None:
frame_length = n_fft
if frame_step is None:
frame_step = int(frame_length // 2)
# Add the input channel dimension.
signal = signal[:, :, jnp.newaxis]
# Get the window function.
fft_window = sp_signal.get_window(window_fn, frame_length, fftbins=True)
# Pad the window to length n_fft with zeros.
if frame_length < n_fft:
left_pad = int((n_fft - frame_length) // 2)
right_pad = n_fft - frame_length - left_pad
fft_window = np.pad(fft_window, (left_pad, right_pad), mode='constant')
# Make it broadcastable.
fft_window = fft_window[:, jnp.newaxis]
# Pad the signal if needed.
if pad != Pad.NONE:
if pad == Pad.START:
pad_shape = (n_fft // 2, 0) # for istft reconstruction
elif pad == Pad.END:
pad_shape = (0, n_fft - 1) # to mimic pad_end mode of tf.signal.stft
elif pad == Pad.BOTH:
pad_shape = (n_fft // 2, n_fft // 2) # for istft reconstruction
elif pad == Pad.ALIGNED:
# Pad signal symmetrically so we obtain aligned frames.
assert signal_length % frame_step == 0
assert frame_length % frame_step == 0
padding = (frame_length - frame_step) // 2
pad_shape = (padding, padding)
else:
raise ValueError(
f'Padding should be NONE, START, END, BOTH, or ALIGHED, get {pad}.')
signal = jnp.pad(signal, pad_width=((0, 0), pad_shape, (0, 0)),
mode=pad_mode)
elif signal_length < n_fft:
raise ValueError(
f'n_fft of {n_fft} is bigger than signal of length {signal_length}')
# Extract frames and compute FFTs using convlution.
ch_out = n_fft // 2 + 1
# w_shape: (kernel_shape, ch_in, ch_out)
w = (_dft_matrix_np(n_fft)[:, :ch_out] * fft_window)[:, jnp.newaxis, :]
real = lax.conv_general_dilated(
signal, jnp.real(w), window_strides=[frame_step], padding='VALID',
precision=precision, dimension_numbers=('NHC', 'HIO', 'NHC'))
imag = lax.conv_general_dilated(
signal, jnp.imag(w), window_strides=[frame_step], padding='VALID',
precision=precision, dimension_numbers=('NHC', 'HIO', 'NHC'))
return real + 1j * imag
def istft(stft_matrix: chex.Array,
frame_length: Optional[int] = None,
frame_step: Optional[int] = None,
window_fn: Optional[Union[str, float, Tuple[str, float]]] = 'hann',
pad: Pad = Pad.END,
length: Optional[int] = None,
precision: Optional[jax.lax.Precision] = None) -> chex.Array:
"""Computes the inverse Short-time Fourier Transform (iSTFT) of the signal.
https://en.wikipedia.org/wiki/Short-time_Fourier_transform#Inverse_STFT
It converts the time-frequency domain complex signal back to the time domain.
This implementation leverages jax.lax.conv which makes it available to use
hardward accelerators.
Args:
stft_matrix: input complex matrix of shape [batch_size, num_frames,
n_fft // 2 + 1].
frame_length: the size of each signal frame. If unspecified it defaults to
be equal to `n_fft`.
frame_step: the hop size of extracting signal frames. If unspecified it
defaults to be equal to `int(frame_length // 2)`.
window_fn: applied to each frame to remove the discontinuities at the edge
of the frame introduced by segmentation. It is passed to
`scipy.signal.get_window` - see the oringal Scipy doc for more details
(docs.scipy.org/doc/scipy-1.7.1/reference/generated/scipy.signal
.get_window.html).
pad: pad the signal at the end(s) by `int(n_fft // 2)`. Can either be
`Pad.NONE`, `Pad.START`, `Pad.END`, `Pad.BOTH`, `Pad.ALIGNED`.
length: the trim length of the time domain signal to output.
precision: precision of the convolution. Either `None`, which means the
default precision for the backend, a `lax.Precision` enum value
(`Precision.DEFAULT`, `Precision.HIGH` or `Precision.HIGHEST`) or a tuple
of two `lax.Precision` enums indicating precision of `lhs` and `rhs`. For
more details see the doc of `lax.conv`.
Returns:
The time-frequency representation of the signal of shape
`[batch_size, num_frames, n_fft/2]`, in which `num_frames` is computed
from the length of the signal and `step_size`.
"""
n_fft = 2 * (stft_matrix.shape[-1] - 1)
num_frames = stft_matrix.shape[1]
if frame_length is None:
frame_length = n_fft
if frame_step is None:
frame_step = int(frame_length // 2)
# Get the window function.
ifft_window = scipy.signal.get_window(window_fn, frame_length, fftbins=True)
# Pad the window to length n_fft with zeros.
if frame_length < n_fft:
left_pad = int((n_fft - frame_length) // 2)
right_pad = n_fft - frame_length - left_pad
ifft_window = np.pad(ifft_window, (left_pad, right_pad), mode='constant')
stft_real = jnp.real(stft_matrix)
stft_imag = jnp.imag(stft_matrix)
# Get full stft matrix: (batch_size, num_frames, n_fft // 2 + 1) -->
# (batch_size, num_frames, n_fft)
full_stft_real = jnp.concatenate(
[stft_real, jnp.flip(stft_real[:, :, 1:-1], axis=2)], axis=2)
full_stft_imag = jnp.concatenate(
[stft_imag, -jnp.flip(stft_imag[:, :, 1:-1], axis=2)], axis=2)
# w_shape: (kernel_shape, n_fft, n_fft)
w = _dft_matrix_np(n_fft, inverse=True) / n_fft
w = (w * ifft_window[jnp.newaxis, :])[jnp.newaxis, :, :]
# Calculate IDFT frame by frame.
real = lax.conv_general_dilated(
full_stft_real, jnp.real(w), window_strides=[1], padding='VALID',
precision=precision, dimension_numbers=('NHC', 'HIO', 'NHC'))
imag = lax.conv_general_dilated(
full_stft_imag, jnp.imag(w), window_strides=[1], padding='VALID',
precision=precision, dimension_numbers=('NHC', 'HIO', 'NHC'))
signal = real - imag # (batch_size, num_frames, n_fft)
# Overlap add signals in frames to reconstruct signals.
w_add = jnp.flip(jnp.eye(n_fft), axis=1)[..., jnp.newaxis]
signal = lax.conv_transpose(
signal, w_add, strides=[frame_step], padding='VALID', precision=precision,
dimension_numbers=('NHC', 'HIO', 'NHC'))
signal = jnp.squeeze(signal, axis=-1)
ifft_window_sum = librosa.filters.window_sumsquare(
window_fn,
num_frames,
win_length=frame_length,
n_fft=n_fft,
hop_length=frame_step,
dtype=np.float64,
)
ifft_window_sum = lax.clamp(1e-11, ifft_window_sum, np.inf)
signal /= ifft_window_sum[np.newaxis]
# Un-pad the signal if needed.
if pad in [Pad.START, Pad.BOTH]:
# For now it only recontructs stft mtx from STFT with 'start' or 'both'
# padding mode.
if pad == Pad.START:
start, end = n_fft // 2, None
elif pad == Pad.BOTH:
start, end = n_fft // 2, -n_fft // 2
signal = signal[:, start:end]
else:
raise ValueError(f'Padding should be either START or BOTH, get {pad}.')
if length is not None and signal.shape[1] > length:
signal = signal[:, :length]
return signal
def spectrogram(
waveform: chex.Array,
power: float = 1.0,
frame_length: Optional[int] = 2048,
frame_step: Optional[int] = None,
num_features: Optional[int] = None,
window_fn: Optional[Union[str, float, Tuple[str, float]]] = 'hann',
pad: Pad = Pad.END,
pad_mode: str = 'constant',
precision: Optional[jax.lax.Precision] = None,
) -> chex.Array:
"""Computes audio spectrograms.
https://en.wikipedia.org/wiki/Spectrogram
Args:
waveform: Input waveform signal of shape [batch_size, sequance_length]`.
power: The exponent for the magnitude spectrogram (e.g., 1 for energy and
2 for power).
frame_length: The length of each spectrogram frame.
frame_step: The stride of spectrogram frames.
num_features: The number of spectrogram features.
window_fn: applied to each frame to remove the discontinuities at the edge
of the frame introduced by segmentation. It is passed to
`scipy.signal.get_window` - see the oringal Scipy doc for more details
(docs.scipy.org/doc/scipy-1.7.1/reference/generated/scipy.signal
.get_window.html).
pad: pad the signal at the end(s) by `int(n_fft // 2)`. Can either be
`Pad.NONE`, `Pad.START`, `Pad.END`, `Pad.BOTH`, `Pad.ALIGNED`.
pad_mode: the mode of padding of the signal when `pad` is not None. It is a
string or `None` passed to `jax.numpy.pad`.
precision: precision of the convolution. Either `None`, which means the
default precision for the backend, a `lax.Precision` enum value
(`Precision.DEFAULT`, `Precision.HIGH` or `Precision.HIGHEST`) or a tuple
of two `lax.Precision` enums indicating precision of `lhs` and `rhs`. For
more details see the doc of `lax.conv`.
Returns:
The extracted spectrograms.
"""
stfts = stft(
signal=waveform,
n_fft=frame_length,
frame_length=frame_length,
frame_step=frame_step,
window_fn=window_fn,
pad=pad,
pad_mode=pad_mode,
precision=precision)
spectrograms = jnp.power(jnp.abs(stfts), power)
return spectrograms[..., :num_features]
def mel_spectrogram(
spectrograms: chex.Array,
log_scale: bool = True,
sample_rate: int = 16000,
frame_length: Optional[int] = 2048,
num_features: int = 64,
lower_edge_hertz: float = 80.0,
upper_edge_hertz: Optional[float] = 7600.0,
) -> chex.Array:
"""Converts the spectrograms to Mel-scale.
https://en.wikipedia.org/wiki/Mel_scale
Args:
spectrograms: Input spectrograms of shape [batch_size, time_steps,
num_features].
log_scale: Whether to return the mel_filterbanks in the log scale.
sample_rate: The sample rate of the input audio.
frame_length: The length of each spectrogram frame.
num_features: The number of mel spectrogram features.
lower_edge_hertz: Lowest frequency to consider to general mel filterbanks.
upper_edge_hertz: Highest frequency to consider to general mel filterbanks.
If None, use `sample_rate / 2.0`.
Returns:
Converted spectrograms in (log) Mel-scale.
"""
# This setup mimics tf.signal.linear_to_mel_weight_matrix.
linear_to_mel_weight_matrix = librosa.filters.mel(
sr=sample_rate, n_fft=frame_length, n_mels=num_features,
fmin=lower_edge_hertz, fmax=upper_edge_hertz, htk=True, norm=None).T
spectrograms = jnp.matmul(spectrograms, linear_to_mel_weight_matrix)
if log_scale:
spectrograms = jnp.log(spectrograms + 1e-6)
return spectrograms
def mfcc(
log_mel_spectrograms: chex.Array,
num_mfcc_features: int = 13,
) -> chex.Array:
"""Converts the log-Mel spectrograms to MFCCs.
https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
Args:
log_mel_spectrograms: Log-Mel-scale spectrograms of shape [batch_size,
time_steps, num_features].
num_mfcc_features: Number of MFCC features.
Returns:
MFCCs converted from `log_mel_spectrograms`.
"""
num_mel_bins = log_mel_spectrograms.shape[-1]
# This setup mimics tf.signal.mfccs_from_log_mel_spectrograms.
log_mel_spectrograms = jax.scipy.fft.dct(
log_mel_spectrograms, type=2, norm=None)
log_mel_spectrograms /= jnp.sqrt(2.0 * num_mel_bins)
return log_mel_spectrograms[..., :num_mfcc_features]
def phase_vocoder(
stft_matrix: chex.Array,
rate: float,
hop_length: Optional[int] = None) -> chex.Array:
"""Speeds up in time by `rate` without changing the pitch.
https://en.wikipedia.org/wiki/Phase_vocoder
Args:
stft_matrix: the time-frequency representation of the signal of shape
`[batch_size, num_steps, num_freqs]`. Should be in complex.
rate: speed change factor. Faster if `rate > 1`.
hop_length: the hop size of extracting signal frames. If None it is default
to `(num_steps - 1) // 2`.
Returns:
Stretched STFT matrix whose `num_frames` is changed to`ceil(num_frames /
rate)`.
"""
if rate == 1.0:
return stft_matrix
num_dims = len(stft_matrix.shape)
num_steps = stft_matrix.shape[1]
num_freqs = stft_matrix.shape[2]
n_fft = 2 * (num_freqs - 1)
if hop_length is None:
hop_length = int(n_fft // 2)
# Expected phase adance in each bin
phase_advance = jnp.linspace(0., jnp.pi * hop_length, num_freqs)
phase_advance = jnp.reshape(phase_advance, [1, 1, num_freqs])
# Create new time steps
time_steps = jnp.arange(0, num_steps, rate)
# Weighting for linear magnitude interpolation
alphas = jnp.mod(time_steps, 1.0)
shape = [1] * num_dims
shape[1] = len(time_steps)
alphas = alphas.reshape(shape)
# Pad the time dimension to simplify boundary logic
pad_shape = [(0, 0)] * num_dims
pad_shape[1] = (0, 2)
stft_matrix = jnp.pad(stft_matrix, pad_shape, 'constant')
stft_matrix_0 = stft_matrix[:, jnp.int32(time_steps), :]
stft_matrix_1 = stft_matrix[:, jnp.int32(time_steps + 1), :]
mag = (1. - alphas) * jnp.abs(stft_matrix_0) + alphas * jnp.abs(stft_matrix_1)
# Phase accumulator
phase_0 = jnp.angle(stft_matrix[:, :1, :])
# Compute phase advance
phase = jnp.angle(stft_matrix_1) - jnp.angle(stft_matrix_0) - phase_advance
# Wrap to -pi:pi range
phase -= 2.0 * jnp.pi * jnp.round(phase / (2.0 * jnp.pi))
# Accumulate phase
phase += phase_advance
phase = jnp.concatenate([phase_0, phase[:, :-1, :]], axis=1)
phase_acc = jnp.cumsum(phase, axis=1)
return mag * jnp.exp(1.0j * phase_acc)
def resample(x: chex.Array,
num: int,
axis: int = 0,
window: Optional[str] = None,
domain: str = 'time') -> chex.Array:
"""Resamples `x` using Fourier transforms to `num` samples along the `axis`.
This implementation follows `scipy.signal.resample` but is jittable.
Args:
x: Input signal to be resampled.
num: The number of samples in the resampled signal.
axis: The axis of `x` to be resampled. Default to 0.
window: The window function applied to the spectral domain signal. Available
windows are from `scipy.signal.get_window`.
domain: If 'time', then `x` is considered as in the time domain. If 'freq`,
it is a frequency domain signal.
Returns:
Resampled version of `x`.
Raises:
ValueError: if domain is not one of {'time', 'freq'}.
"""
if domain not in ['time', 'freq']:
raise ValueError(f'Domain {domain} is not one of time and freq.')
length = x.shape[axis]
x_is_real = jnp.isrealobj(x)
if domain == 'time':
if x_is_real:
x_spec = jnp.fft.rfft(x, axis=axis)
else:
x_spec = jnp.fft.fft(x, axis=axis)
else:
x_spec = x
# Apply window to the spectrum
if window is not None:
spec_window = scipy.fftpack.ifftshift(sp_signal.get_window(window, length))
newshape_spec_window = [1] * x.ndim
newshape_spec_window[axis] = x_spec.shape[axis]
if x_is_real:
# Fold the window back on itself to mimic complex behavior
spec_window_real = spec_window.copy()
spec_window_real[1:] += spec_window_real[-1:0:-1]
spec_window_real[1:] *= 0.5
x_spec *= spec_window_real[:newshape_spec_window[axis]].reshape(
newshape_spec_window)
else:
x_spec *= spec_window.reshape(newshape_spec_window)
# Copy each half of the original spectrum to the output spectrum, either
# truncating high frequences (downsampling) or zero-padding them
# (upsampling)
# Placeholder array for output spectrum
newshape = list(x.shape)
newshape[axis] = num // 2 + 1 if x_is_real else num
y_spec = jnp.zeros(newshape, x_spec.dtype)
# Copy positive frequency components (and Nyquist, if present)
n = min(num, length)
nyq = n // 2 + 1 # Slice index that includes Nyquist if present
sl = [slice(None)] * x.ndim
sl[axis] = slice(0, nyq)
y_spec = y_spec.at[tuple(sl)].set(x_spec[tuple(sl)])
if not x_is_real:
# Copy negative frequency components
if n > 2: # (slice expression doesn't collapse to empty array)
sl[axis] = slice(nyq - n, None)
y_spec = y_spec.at[tuple(sl)].set(x_spec[tuple(sl)])
# Split/join Nyquist component(s) if present
# So far we have set y_spec[+n/2]=x_spec[+n/2]
if n % 2 == 0:
if num < length: # downsampling
if x_is_real:
sl[axis] = slice(n//2, n//2 + 1)
y_spec = y_spec.at[tuple(sl)].set(y_spec[tuple(sl)] * 2.)
else:
# select the component of Y at frequency +N/2,
# add the component of X at -N/2
sl[axis] = slice(-n//2, -n//2 + 1)
y_spec = y_spec.at[tuple(sl)].add(x_spec[tuple(sl)])
elif length < num: # upsampling
# select the component at frequency +n/2 and halve it
sl[axis] = slice(n//2, n//2 + 1)
y_spec = y_spec.at[tuple(sl)].set(y_spec[tuple(sl)] * 0.5)
if not x_is_real:
temp = y_spec[tuple(sl)]
# set the component at -N/2 equal to the component at +N/2
sl[axis] = slice(num-n//2, num-n//2 + 1)
y_spec = y_spec.at[tuple(sl)].set(temp)
# Inverse transform
if x_is_real:
y = jnp.fft.irfft(y_spec, axis=axis) # specifying num is not implemented.
else:
y = jnp.fft.ifft(y_spec, axis=axis)
assert y.shape[axis] == num
y *= (float(num) / float(length))
return y
def _dft_matrix_np(
n_points: int,
inverse: bool = False,
dtype: np.dtype = np.complex128) -> np.ndarray:
"""Constructs a discrete Fourier transform (DFT) transformation matrix.
https://en.wikipedia.org/wiki/Discrete_Fourier_transform
Args:
n_points: number of DFT points.
inverse: whether to compute the inverse DFT matrix.
dtype: the data type of the output.
Returns:
The DFT matrix of the shape [n_points, n_points].
"""
x, y = np.meshgrid(np.arange(n_points), np.arange(n_points))
if inverse:
omega = np.exp(2.0 * np.pi * 1j / n_points)
else:
omega = np.exp(-2.0 * np.pi * 1j / n_points)
return np.power(omega, x * y).astype(dtype)
| dm_aux-main | dm_aux/spectral.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| dm_aux-main | dm_aux/opensource/src_init.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ccbo init."""
| ccbo-main | __init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
from setuptools import find_packages
from setuptools import setup
setup(
name='ccbo',
version='1.0',
description=(
'Code for the paper: Aglietti, Malek, Ktena, Chiappa. "Constrained'
' Causal Bayesian Optimization" ICML 2023.'
),
author='DeepMind',
author_email='[email protected]',
license='Apache License, Version 2.0',
url='https://github.com/deepmind/ccbo',
packages=find_packages(),
install_requires=[
'absl-py',
'emukit',
'GPy',
'graphviz',
'matplotlib',
'numpy',
'networkx',
'paramz',
'pygraphviz',
'scipy',
'scikit-learn',
'typing_extensions',
'ml_collections'
],
tests_require=['mock'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| ccbo-main | setup.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the kernel for a multi-task model with coavriance structure induced by the causal graph."""
import functools
from typing import Any, Dict, Optional, Callable, List, Set, Tuple
from GPy import kern
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import gp_utils
class CausalCoregionalize(kern.Kern):
"""Covariance function for causal multi-task model.
input_dim: input dimensionality.
target_list: list of target variables e.g. Y and the constrained variables.
graph: DAG.
target_variable: name of target variable.
exploration_set: intervened variables.
fitted_scm: estimated SCM object.
true_scm_funcs: true functions in the SCM.
dict_mean_product: dictionary of stored values for the mean of the product
of the target causal effects computed at different intervention levels.
dict_constraints_mean_product: dictionary of stored values for the mean of
the product of the constraint causal effects computed at different
intervention levels.
dict_product_mean: dictionary of stored values for the product of the mean
causal effect on the target variable.
dict_constraints_product_mean: dictionary of stored values for the product
of the mean causal effects on the constrained variables.
active_dims: active dimensions of inputs X we will work on. When the
dimensionsality of an input X is D this parameter determined if all D
dimensions should be used to compute the kernel. This is the case when
active_dims = None. When instead active_dims is not None and, for instance,
we have active_dims=0, the kernel will be computed with only the first
column that is x[:,0].
seed: random seed used to sample from the estimated SCM.
n_samples: number of samples for the Monte Carlo estimates.
name: name of the kernel.
"""
def __init__(
self,
input_dim: int,
target_list: List[str],
graph: multidigraph.MultiDiGraph,
target_variable: str,
exploration_set: Set[str],
fitted_scm: Optional[Callable[[], Any]],
true_scm_funcs: Optional[Callable[[], Any]],
dict_mean_product: Dict[Tuple[str, ...], Any],
dict_constraints_mean_product: Dict[Tuple[str, ...], Dict[str, Any]],
dict_product_mean: Dict[Tuple[str, ...], Any],
dict_constraints_product_mean: Dict[Tuple[str, ...], Dict[str, Any]],
active_dims: Optional[int] = None,
seed: int = 1,
n_samples: int = 10,
use_true_scm: bool = False,
name: str = "causal_coregionalize"):
args = {"input_dim": input_dim, "active_dims": active_dims, "name": name}
super().__init__(**args)
self.graph = graph
self.exploration_set = exploration_set
# Initializing the function to compute the product of the mean or the mean
# of the product of the causal effects by sampling from the SCM to get
# a Monte Carlo estimate.
get_product_mean_functions = functools.partial(
gp_utils.get_product_mean_functions,
graph=graph,
target_variable=target_variable,
target_list=target_list,
exploration_set=exploration_set,
fitted_scm=fitted_scm,
true_scm_funcs=true_scm_funcs,
n_samples=n_samples,
use_true_scm=use_true_scm)
# When computing the mean of the product we first multiply the causal
# effects on the individual variables and then take the average. With
# compute_moments=False we avoid computing the moments inside the sampling
# function and do it only after having multiplied the samples
self.mean_product = get_product_mean_functions(
compute_moments=False, seeds=[seed, seed],
mean_dict_store=dict_mean_product,
mean_constraints_dict_store=dict_constraints_mean_product
)
# When computing the product of the mean we can take the moment of the
# samples inside the sampling function (compute_moments=True) and multiply
# those afterwards
self.product_mean = get_product_mean_functions(
compute_moments=True, seeds=[seed, seed],
mean_dict_store=dict_product_mean,
mean_constraints_dict_store=dict_constraints_product_mean
)
def K(self, x: np.ndarray, xprime: Optional[np.ndarray] = None) -> np.ndarray:
# The kernel is computed as E[mu_Xmu_X2] - E[mu_X]E[mu_X2] where mu is the
# causal effect of the exploration set on X and X2. These are the variables
# corresponding to the values of the last dimension of the input x and x2.
if xprime is None:
xprime = x
return self.mean_product(x, xprime) - self.product_mean(x, xprime)
def Kdiag(self, x: np.ndarray) -> np.ndarray:
return (self.mean_product(x, None) - self.product_mean(x, None))[:, 0]
def to_dict(self) -> Dict[str, Any]:
input_dict = self._save_to_input_dict()
input_dict["class"] = "GPy.kern.CausalCoregionalize"
return input_dict
def gradients_X(self, *unused_args):
pass
def gradients_X_diag(self, *unused_args):
pass
def update_gradients_full(self, *unused_args):
pass
def update_gradients_diag(self, *unused_args):
pass
| ccbo-main | kernels/causal_coregionalize_kernel.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the causal kernel as an instance of a stationary kernel."""
from typing import Callable, Optional
import GPy
from GPy.kern.src import psi_comp
from GPy.kern.src import stationary
import numpy as np
from paramz import transformations
class CausalRBF(stationary.Stationary):
"""Implement the causal RBF kernel.
For details about the kernel see:
Aglietti, V., Lu, X., Paleyes, A., & González, J. (2020, June). Causal
bayesian optimization. In International Conference on Artificial
Intelligence and Statistics (pp. 3155-3164). PMLR.
"""
def __init__(
self,
input_dim: int,
variance_adjustment: Callable[[np.ndarray], np.ndarray],
variance: float = 1.0,
lengthscale: Optional[float] = None,
rescale_variance: Optional[float] = 1.0,
ard: Optional[bool] = False,
active_dims: Optional[int] = None,
name: str = "rbf",
usegpu: Optional[bool] = False,
inv_l: Optional[bool] = False,
):
super().__init__(input_dim, variance, lengthscale, ard, active_dims, name,
useGPU=usegpu)
self.usegpu = usegpu
if self.usegpu:
self.psicomp = psi_comp.PSICOMP_RBF_GPU()
else:
self.psicomp = psi_comp.PSICOMP_RBF()
self.use_invengthscale = inv_l
if inv_l:
self.unlink_parameter(self.lengthscale)
self.inv_l = GPy.core.Param("inv_lengthscale", 1.0 / self.lengthscale**2,
transformations.Logexp())
self.link_parameter(self.inv_l)
self.variance_adjustment = variance_adjustment
self.rescale_variance = GPy.core.Param("rescale_variance", rescale_variance,
transformations.Logexp())
def K(self, x: np.ndarray, x2: Optional[np.ndarray] = None) -> np.ndarray:
"""Kernel function applied on inputs x and x2."""
if x2 is None:
x2 = x
r = self._scaled_dist(x, x2)
values = self.variance * np.exp(-0.5 * r ** 2)
value_diagonal_x = self.variance_adjustment(x)
value_diagonal_x2 = self.variance_adjustment(x2)
additional_matrix = np.dot(
np.sqrt(value_diagonal_x), np.sqrt(np.transpose(value_diagonal_x2)))
assert additional_matrix.shape == values.shape, (
additional_matrix.shape,
values.shape,
)
return values + additional_matrix
def Kdiag(self, x: np.ndarray) -> np.ndarray:
# ret = np.empty(x.shape[0])
# ret[:] = np.repeat(0.1, x.shape[0])
# diagonal_terms = ret
value = self.variance_adjustment(x)
if x.shape[0] == 1 and x.shape[1] == 1:
diagonal_terms = value
else:
if np.isscalar(value):
diagonal_terms = value
else:
diagonal_terms = value[:, 0]
return self.variance + diagonal_terms
def K_of_r(self, r: float) -> float:
return self.variance * np.exp(-0.5 * r ** 2)
def dK_dr(self, r: float) -> float:
return -r * self.K_of_r(r)
def dK2_drdr(self, r: float) -> float:
return (r ** 2 - 1) * self.K_of_r(r)
def dK2_drdr_diag(self) -> float:
return -self.variance # as the diagonal of r is always filled with zeros
| ccbo-main | kernels/causal_kernel.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""kernels init."""
| ccbo-main | kernels/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define method for constrained causal Bayesian Optimization."""
from __future__ import annotations
import collections
import copy
import functools
import itertools
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple
from absl import logging
from emukit.model_wrappers import gpy_model_wrappers
from GPy import core
from GPy.core.parameterization import priors
from GPy.kern.src import rbf
from GPy.util import multioutput
from networkx.classes import multidigraph
import numpy as np
from ccbo.acquisitions import evaluate_acquisitions
from ccbo.kernels import causal_coregionalize_kernel
from ccbo.methods import cbo
from ccbo.utils import constraints_functions
from ccbo.utils import cost_utils
from ccbo.utils import gp_utils
from ccbo.utils import intervention_functions
from ccbo.utils import plotting_utils
from ccbo.utils import sampling_utils
from ccbo.utils import scm_utils
from ccbo.utils import utilities
class CCBO(cbo.CBO):
"""Constrained Causal Bayesian Optimisation class."""
def __init__(
self,
graph: multidigraph.MultiDiGraph,
scm: Any,
make_scm_estimator: Callable[
[
multidigraph.MultiDiGraph,
Dict[Tuple[Optional[Any], ...], Any],
collections.OrderedDict[str, Any],
],
Any,
],
observation_samples: Dict[str, np.ndarray],
intervention_domain: Dict[str, Sequence[float]],
intervention_samples: Optional[
Dict[Tuple[str, ...], Optional[Dict[str, Any]]]
],
exploration_sets: List[Tuple[str, ...]],
number_of_trials: int,
ground_truth: Optional[Dict[Tuple[str, ...], Any]] = None,
task: utilities.Task = utilities.Task.MIN,
n_restart: int = 1,
cost_type: int = 1,
hp_prior: bool = True,
num_anchor_points: int = 100,
seed: int = 1,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
causal_prior: bool = True,
verbose: bool = False,
constraints: Optional[Dict[str, Any]] = None,
ground_truth_constraints: Optional[Dict[str, Dict[str,
List[float]]]] = None,
multi_task_model: bool = False,
sampling_seed: Optional[int] = None,
n_kernel_samples: int = 1,
noisy_observations: bool = False,
noisy_acquisition: bool = False,
fix_likelihood_noise_var: bool = True,
n_samples_per_intervention: int = 1,
add_rbf_kernel: bool = False,
update_scm: bool = False,
use_prior_mean: bool = False,
size_intervention_grid: int = 100,
use_true_scm: bool = False):
"""Constrained Causal Bayesian Optimisation class."""
super().__init__(
graph=graph,
scm=scm,
make_scm_estimator=make_scm_estimator,
observation_samples=observation_samples,
intervention_domain=intervention_domain,
intervention_samples=intervention_samples,
exploration_sets=exploration_sets,
number_of_trials=number_of_trials,
ground_truth=ground_truth,
task=task,
n_restart=n_restart,
cost_type=cost_type,
hp_prior=hp_prior,
num_anchor_points=num_anchor_points,
seed=seed,
sample_anchor_points=sample_anchor_points,
seed_anchor_points=seed_anchor_points,
causal_prior=causal_prior,
verbose=verbose,
sampling_seed=sampling_seed,
noisy_observations=noisy_observations,
noisy_acquisition=noisy_acquisition,
n_samples_per_intervention=n_samples_per_intervention,
fix_likelihood_noise_var=fix_likelihood_noise_var,
size_intervention_grid=size_intervention_grid,
use_true_scm=use_true_scm,
use_prior_mean=use_prior_mean,
)
# Initialization specific to the constrained CBO algorithm
self.constraints = constraints
# Verify that constraints exist
assert self.constraints is not None, "cCBO requires a list of constraints"
# Initialise the list and dict for the models on the constraints
(self.constraints_dict, self.initial_feasibility, self.bo_model_constraints,
self.feasibility, self.interventional_data_y_constraints
) = constraints_functions.get_constraints_dicts(self.exploration_sets,
self.constraints,
self.graph,
self.target_variable,
self.observational_samples)
self.feasibility_noiseless = {es: [] for es in self.exploration_sets}
# Initialize mean and var functions for GPs on constraints functions
self.mean_function_constraints = copy.deepcopy(self.bo_model_constraints)
self.var_function_constraints = copy.deepcopy(self.bo_model_constraints)
# Initialize best feasible exploration sets over trials
(self.best_es_over_trials, self.best_level_over_trials
) = constraints_functions.best_feasible_initial_es(
self.initial_feasibility, self.best_initial_es, self.best_initial_level,
self.interventional_data_y, self.interventional_data_x,
utilities.EVAL_FN[self.task])
# Store values for prior mean and variance
self.mean_constraints_dict_store = {
es: {p: {} for p in self.constraints_dict[es]["vars"]
} for es in self.exploration_sets
}
self.var_constraints_dict_store = copy.deepcopy(
self.mean_constraints_dict_store)
# Initialize object to store the observed constraints values
# The interventional input values are the same for target and constraints
constraints_functions.initialise_constraints_interventional_objects(
self.exploration_sets, intervention_samples,
self.interventional_data_y_constraints, self.bo_model_constraints,
self.initial_feasibility, constraints, self.feasibility)
# Get interventional domains for both manipulative and not manipulative vars
self.interventional_constraint_variable_limits = { # pylint: disable=g-complex-comprehension
key: scm.variables[key][1]
for key in self.all_vars
if scm.variables[key][0] in [
utilities.VariableType.NONMANIPULATIVE.value,
utilities.VariableType.MANIPULATIVE.value
]
}
# Store acquisition function values
self.improvement_dict = {es: [] for es in self.exploration_sets}
self.prob_feasibility_dict = {es: [] for es in self.exploration_sets}
self.ground_truth_constraints = ground_truth_constraints
# Whether to use a multi-task model for the target and the constraints
self.multi_task_model = multi_task_model
# Number of samples for estimating the kernel of a multi-task GP
self.n_kernel_samples = n_kernel_samples
# Add RBF kernel to the coregionalized kernel
self.add_rbf_kernel = add_rbf_kernel
# Update the SCM fitting as we collect interventional data
self.update_scm = update_scm
self.dict_mean_product = copy.deepcopy(
self.mean_dict_store)
self.dict_constraints_mean_product = copy.deepcopy(
self.mean_constraints_dict_store)
self.dict_product_mean = copy.deepcopy(
self.mean_dict_store)
self.dict_constraints_product_mean = copy.deepcopy(
self.mean_constraints_dict_store)
def run(self) -> None:
"""Run the BO loop."""
if self.verbose:
assert self.ground_truth is not None, "Provide ground truth"
assert self.ground_truth_constraints is not None, "Provide ground truth"
# Get current target and best_es
target = self.target_variable
best_es = self.best_initial_es
for it in range(self.number_of_trials):
logging.info("Trial: %s", it)
if it == 0: # First trial
self.best_es_over_trials_list.append([
best_es,
int(np.all(list(self.initial_feasibility[best_es].values())))
])
self.best_level_over_trials_list.append(self.best_initial_level)
fitted_scm = self.observation_trial(best_es, target, it)
# Update model on the constraints
self._update_sufficient_statistics_constraints(fitted_scm=fitted_scm) # pytype: disable=wrong-arg-types # dynamic-method-lookup
# When observe thus append previous feasibility values for logs
for es in self.exploration_sets:
self.feasibility[es].append(self.feasibility[es][-1])
if self.verbose:
logging.info("Current feasibility: %s", self.feasibility)
logging.info("Current optimal y values: %s",
self.optimal_outcome_values_during_trials)
else:
if it > 1 and self.update_scm:
# Refit Gaussian processes to functions in the SCM using observational
# and interventional data. These are then used to update the
# multi-task kernel when using a causal prior.
logging.warning("The functions in the SCM are refitted at each"
" iteration and the causal kernel is thus updated. "
" This might significantly slow down the algorithm!")
# Refit the functions
self.fitted_scm_fncs = scm_utils.fit_scm_fncs(
self.graph, self.observational_samples, self.scm_funcs,
self.n_restart)
# Update the simulator
fitted_scm = self.make_scm_estimator(self.graph, self.scm_fncs,
self.scm)
# Reset the dict to store the sampled mean causal effects as we don't
# want to reuse values obtained earlier with different SCM functions
self.mean_dict_store = {es: {} for es in self.exploration_sets}
self.mean_constraints_dict_store = {
es: {p: {} for p in self.constraints_dict[es]["vars"]
} for es in self.exploration_sets
}
self._intervention_trial(target, it, self.multi_task_model, fitted_scm)
# Store optimal intervention
self.optimal_intervention = {
"set": self.best_es_over_trials,
"level": self.best_level_over_trials,
"outcome": self.optimal_outcome_values_during_trials[-1],
}
def _intervention_trial(
self,
target: str,
it: int,
multi_task_model: bool = False,
fitted_scm: Optional[Callable[[], Any]] = None,
) -> None:
"""Run one intervention trial of the BO loop."""
# Update models if we have only observed so far
if self.trial_type[-1] == utilities.Trial.OBSERVATION:
if multi_task_model:
assert fitted_scm
self._update_all_models(fitted_scm=fitted_scm)
else:
self._update_all_surrogate_models()
self._update_all_surrogate_models_constraints()
# Run the actual per trial computation
self._per_trial_computations(it, target, fitted_scm)
def _update_all_models(self, fitted_scm: Callable[[], Any]) -> None:
"""Update surrogate models (GPs) on the target and the constraints."""
for es in self.exploration_sets:
# If there is data for an exploration set we want to update the models
if (self.interventional_data_x[es] is not None and
self.interventional_data_y[es] is not None):
if self.constraints_dict[es]["num"] == 0:
# If there are no constraints we just update the surrogate model
update_model_cls = functools.partial(
self._update_bo_model,
data_x=self.interventional_data_x,
data_y=self.interventional_data_y,
mean_functions=self.mean_function,
variance_functions=self.variance_function,
bo_model=self.bo_model)
else:
# Update the multi-task model
update_model_cls = functools.partial(
self._update_multi_task_model,
causal_prior=self.causal_prior,
graph=self.graph,
mean_function=self.mean_function,
mean_function_constraints=self.mean_function_constraints,
fitted_scm=fitted_scm)
update_model_cls(
exploration_set=es,
hp_prior=self.hp_prior,
fix_likelihood_noise_var=self.fix_likelihood_noise_var,
interventional_limits=self
.interventional_constraint_variable_limits,
n_samples_per_intervention=self.n_samples_per_intervention)
def _update_multi_task_model(
self,
exploration_set: Set[str],
n_samples_per_intervention: int,
causal_prior: bool = False,
graph: Optional[multidigraph.MultiDiGraph] = None,
mean_function: Optional[Dict[Set[str], Any]] = None,
mean_function_constraints: Optional[Dict[Set[str], Any]] = None,
fitted_scm: Optional[Callable[[], Any]] = None,
hp_prior: bool = True,
lengthscale: float = 1.,
variance: float = 1.,
alpha: float = 2,
beta: float = 0.5,
beta_l: float = 1.5,
n_optimization_restarts: int = 1,
verbose: bool = False,
fix_likelihood_noise_var: bool = True,
interventional_limits: Optional[Dict[str, Sequence[float]]] = None):
"""Update surrogate models (GPs) with a multi-task structure."""
input_dim = len(exploration_set)
x = self.interventional_data_x[exploration_set]
y = self.interventional_data_y[exploration_set]
y_constraints = list(
self.interventional_data_y_constraints[exploration_set].values())
# Define multi-task outputs
y_multi_task = [y] + y_constraints
# The number of outputs is given by the constraints plus the target
num_outputs = self.constraints_dict[exploration_set]["num"] + 1
assert len(y_multi_task) == num_outputs
# Define multi-task inputs
x_multi_task = [x] * num_outputs
# Define RBF kernel and put priors on hyperparameters
kernel = rbf.RBF(input_dim, lengthscale=lengthscale, variance=variance)
# If hp_prior is True, we place a prior on kernel hyperparameters of each
# function to get a MAP. This is for numerical stability issues.
if hp_prior:
gamma = priors.Gamma(a=alpha, b=beta)
kernel.variance.set_prior(gamma)
if interventional_limits:
all_vars_multitask_model = list(
exploration_set
) + self.constraints_dict[exploration_set]["vars"]
alpha_l = gp_utils.get_lenghscale_hp(all_vars_multitask_model,
interventional_limits)
gamma = priors.Gamma(a=alpha_l, b=beta_l)
kernel.lengthscale.set_prior(gamma)
# Merge all estimated mean functions
total_mean_list = [mean_function[exploration_set]] + [
*mean_function_constraints[exploration_set].values()
]
if not self.use_prior_mean:
mean_function = None
else:
# Define prior mean function
mean_function = core.Mapping(input_dim + 1, 1)
mean_function.f = gp_utils.mean_function_multitask_model(total_mean_list)
mean_function.update_gradients = lambda a, b: None
# Define a kernel for the multi-task model
if not causal_prior:
# Define an ICM type of kernel
multitask_kernel = multioutput.ICM(
input_dim=input_dim, num_outputs=num_outputs, kernel=kernel)
else:
# Use a kernel giving a correlation structure among outputs that
# reflects the DAG structure. The kernel is numerically approximated.
# active_dims is used to indicate which dimension of the inputs should
# be used in the kernel computation. Here we want to use also the task
# type that is the function index. We thus adjust the input dim so that
# the kernel internally considers it.
target_list = [self.target_variable
] + self.constraints_dict[exploration_set]["vars"]
# Notice that the input dimensionality here is given by the original
# input dimensionality plus the additional dimension given by
# the task index (or function index)
multitask_kernel = causal_coregionalize_kernel.CausalCoregionalize(
input_dim=input_dim + 1,
target_list=target_list,
graph=graph,
target_variable=self.target_variable,
exploration_set=exploration_set,
fitted_scm=fitted_scm,
true_scm_funcs=self.scm_funcs,
dict_mean_product=self.dict_mean_product,
dict_constraints_mean_product=self.dict_constraints_mean_product,
dict_product_mean=self.dict_product_mean,
dict_constraints_product_mean=self.dict_constraints_product_mean,
seed=self.sampling_seed,
n_samples=self.n_kernel_samples,
use_true_scm=self.use_true_scm)
if self.add_rbf_kernel:
# Add an RBF kernel to the multi-task kernel to increase the model
# flexibility
multitask_kernel += kernel
# Initialize the multi-task model with the defined kernel and mean function
model = gp_utils.GPCausalCoregionalizedRegression(
x_multi_task,
y_multi_task,
kernel=multitask_kernel,
mean_function=mean_function)
if fix_likelihood_noise_var:
# Fix all likelihood variances to zero
for param in model.likelihood.parameters:
param.fix(1e-5)
else:
# Initialize the value of the liklihood variance considering the number
# of interventional samples we get from each experiment
for param in model.likelihood.parameters:
lik_noise_var = (1./n_samples_per_intervention)
param.variance = lik_noise_var
# Assign to all models for exploration_set the same multi-task GP
# This will be used to compute the acqusition function
multi_task_model = gpy_model_wrappers.GPyMultiOutputWrapper(
gpy_model=model,
n_outputs=num_outputs,
n_optimization_restarts=n_optimization_restarts,
verbose_optimization=verbose)
# Optimize multi-task model but prevent randomization from affecting
# the optimization of the GP hyperparameters
if self.verbose:
print("Optimizing the multi task model for:", exploration_set)
print("Model BEFORE optimizing:", model)
old_seed = np.random.get_state()
np.random.seed(self.seed)
multi_task_model.optimize()
np.random.set_state(old_seed)
if self.verbose:
print("Model AFTER optimizing:", model)
self.bo_model[exploration_set] = multi_task_model
for var in self.constraints_dict[exploration_set]["vars"]:
self.bo_model_constraints[exploration_set][var] = multi_task_model
def _update_all_surrogate_models_constraints(self) -> None:
"""Update all surrogate models (GPs) on the constraints."""
for es in self.exploration_sets:
if (self.interventional_data_x[es] is not None and
self.interventional_data_y[es] is not None):
self._update_bo_model_constraints(es)
def _update_bo_model_constraints(self, es: Tuple[str, ...]) -> None:
"""Update surrogate model (GPs) on the constraints for es."""
constraints_targets_es = list(self.bo_model_constraints[es].keys())
assert set(constraints_targets_es) == set(self.constraints_dict[es]["vars"])
for p in constraints_targets_es:
self._update_bo_model(
exploration_set=p,
intervention_set=es,
data_x=self.interventional_data_x[es],
data_y=self.interventional_data_y_constraints[es],
mean_functions=self.mean_function_constraints[es],
variance_functions=self.var_function_constraints[es],
bo_model=self.bo_model_constraints[es],
hp_prior=self.hp_prior,
fix_likelihood_noise_var=self.fix_likelihood_noise_var,
interventional_limits=self.interventional_constraint_variable_limits,
n_samples_per_intervention=self.n_samples_per_intervention)
def _evaluate_acquisition_functions(self, current_best_global_target,
it: int) -> None:
"""Evaluate the acquisition function given the surrogate models."""
for es in self.exploration_sets:
if (self.interventional_data_x[es] is not None and
self.interventional_data_y[es] is not None):
# If DI, the model exists
bo_model = self.bo_model[es]
bo_model_constraints = self.bo_model_constraints[es]
else:
# If no DI, the model does not exist yet.
# We initialise the standard mean and variance function
# and use a single-task model.
bo_model = None
bo_model_constraints = None
self.mean_function[es] = utilities.standard_mean_function
self.variance_function[es] = utilities.zero_variance_adjustment
# There are constraints for es
if self.constraints_dict[es]:
for j in range(self.constraints_dict[es]["num"]):
self.mean_function_constraints[es][
j] = utilities.standard_mean_function
self.var_function_constraints[es][
j] = utilities.zero_variance_adjustment
if self.seed_anchor_points is None:
seed_to_pass = None
else:
seed_to_pass = int(self.seed_anchor_points * it)
(self.y_acquired[es], self.corresponding_x[es], improvement,
pf) = evaluate_acquisitions.evaluate_constrained_acquisition_function(
self.intervention_exploration_domain[es],
bo_model,
self.mean_function[es],
self.variance_function[es],
current_best_global_target,
es,
self.cost_functions,
self.task,
self.target_variable,
bo_model_constraints,
self.mean_function_constraints[es],
self.var_function_constraints[es],
self.constraints,
self.constraints_dict,
verbose=self.verbose,
num_anchor_points=self.num_anchor_points,
sample_anchor_points=self.sample_anchor_points,
seed_anchor_points=seed_to_pass,
multi_task_model=self.multi_task_model,
noisy_acquisition=self.noisy_acquisition)
self.improvement_dict[es].append(improvement)
self.prob_feasibility_dict[es].append(pf)
def _per_trial_computations(self, it: int, target: str,
fitted_scm: Callable[[], Any]) -> None:
"""Performs computations for each trial iteration for specific target."""
logging.info(">>>")
logging.info("Iteration: %s", it)
logging.info("<<<")
if self.verbose:
print(">>> Target model BEFORE optimization")
plotting_utils.plot_models(self.bo_model, self.exploration_sets,
self.ground_truth, self.interventional_grids,
self.interventional_data_x,
self.interventional_data_y,
self.multi_task_model)
print(">>> Constraints models BEFORE optimization")
plotting_utils.plot_models(
self.bo_model_constraints, self.exploration_sets,
self.ground_truth_constraints, self.interventional_grids,
self.interventional_data_x, self.interventional_data_y_constraints,
self.multi_task_model)
# Indicate that in this trial we are explicitly intervening in the system
self.trial_type.append(utilities.Trial.INTERVENTION)
# Get current best across intervention sets
current_best_global_target = self._get_current_feasible_global_target()
# Compute acquisition function given the updated BO models for DI.
# Notice that we use current_global and the costs to compute it.
self._evaluate_acquisition_functions(current_best_global_target, it)
# Best exploration set based on acquired target-values
best_es = max(self.y_acquired, key=self.y_acquired.get)
new_interventional_data_x = self.corresponding_x[best_es]
self._check_new_point(best_es)
# Get the corresponding outcome values for this intervention set
y_new = self.target_functions[best_es](
target, np.squeeze(new_interventional_data_x))
if self.verbose:
logging.info("Current best global target: %s", current_best_global_target)
logging.info("All y values found: %s", self.y_acquired)
logging.info("Best es found: %s", best_es)
logging.info("Best x found: %s", new_interventional_data_x)
logging.info("Best y found: %s", y_new)
# Get the value for the constraints values (both noisy and noiseless)
# related to the intervened variable
y_new_c = {}
feasibility_list = []
feasibility_list_noiseless = []
if self.constraints_dict[best_es]["num"] == 0:
tmp = 1
feasibility_list.append(tmp)
else:
for j in range(self.constraints_dict[best_es]["num"]):
c_target = self.constraints_dict[best_es]["vars"][j]
y_new_var = self.target_functions[best_es](
target=c_target,
intervention_levels=np.squeeze(new_interventional_data_x))
y_new_var_noiseless = self.noiseless_target_functions[best_es](
target=c_target,
intervention_levels=np.squeeze(new_interventional_data_x))
# To evaluate the feasibility
tmp = constraints_functions.EVAL_CONSTRAINT_OP[
self.constraints[c_target][0]](y_new_var,
self.constraints[c_target][1])
tmp_noiseless = constraints_functions.EVAL_CONSTRAINT_OP[
self.constraints[c_target][0]](y_new_var_noiseless,
self.constraints[c_target][1])
y_new_c[c_target] = y_new_var
feasibility_list.append(tmp)
feasibility_list_noiseless.append(tmp_noiseless)
if self.verbose:
logging.info("Selected set: %s", best_es)
logging.info("Intervention value: %s", new_interventional_data_x)
logging.info("Outcome: %s", y_new)
logging.info("Feasible: %s", bool(tmp))
# Append new interventional observations to refit the SCM at the next trial
if self.update_scm:
# Generate the full samples we used to compute the output and the
# constraints from the true intervened model. These are then appended to
# the observational data which is then used to refit the functions
interventions = intervention_functions.assign_initial_intervention_level(
exploration_set=best_es,
intervention_level=new_interventional_data_x,
variables=list(self.observational_samples.keys()))
# Sample from the true interventional distribution
out = sampling_utils.sample_scm(
scm_funcs=self.scm_funcs,
graph=None,
interventions=interventions,
n_samples=self.n_samples_per_intervention,
compute_moments=True,
moment=0,
seed=self.sampling_seed)
# Append new observations
self.observational_samples = {
key: np.vstack((self.observational_samples[key], out[key]))
for key in out
}
# Update interventional data for the target
self._get_updated_interventional_data(new_interventional_data_x, y_new,
best_es)
# Update interventional data for the constraints
self._get_updated_interventional_data_constraints(y_new_c, best_es)
# Evaluate cost of intervention
self.per_trial_cost.append(
cost_utils.total_intervention_cost(
best_es,
self.cost_functions,
self.interventional_data_x[best_es],
))
# Store the optimal feasible outcome corresponding to intervention levels
self.outcome_values.append(y_new)
self.feasibility[best_es].append(int(all(feasibility_list)))
self.feasibility_noiseless[best_es].append(
int(all(feasibility_list_noiseless)))
# If the new point is feasible check if it is optimal otherwise discard it
if all(feasibility_list):
# If the point collected is feasible we need to compare with the previous
best_value = utilities.EVAL_FN[self.task](y_new,
current_best_global_target)
self.optimal_outcome_values_during_trials.append(best_value)
# Before moving to the next iteration we store the currently found
# best intervention set which corresponds to the one given the min
# between y_new and current_best_global_target
new_best_solution = utilities.ARG_EVAL_FN[self.task](
(y_new, current_best_global_target))
self.best_es_over_trials = (best_es,
self.best_es_over_trials)[new_best_solution]
self.best_level_over_trials = (
new_interventional_data_x,
self.best_level_over_trials)[new_best_solution]
else:
# If the current point is not feasible store the previous value
# in this case the best_es_over_trials does not need to change
self.optimal_outcome_values_during_trials.append(
current_best_global_target)
# Store intervened set and whether it is feasible
self.best_es_over_trials_list.append([best_es, int(all(feasibility_list))])
self.best_level_over_trials_list.append(new_interventional_data_x)
# Store the intervention
if len(new_interventional_data_x.shape) != 2:
self.optimal_intervention_levels[best_es][
it] = utilities.make_column_shape_2d(new_interventional_data_x)
else:
self.optimal_intervention_levels[best_es][it] = new_interventional_data_x
# Update the best_es BO model and the related constraints
if self.multi_task_model and self.constraints_dict[best_es]["num"] > 0:
update_model_cls = functools.partial(
self._update_multi_task_model,
causal_prior=self.causal_prior,
graph=self.graph,
mean_function=self.mean_function,
mean_function_constraints=self.mean_function_constraints,
fitted_scm=fitted_scm)
else:
update_model_cls = functools.partial(
self._update_bo_model,
data_x=self.interventional_data_x,
data_y=self.interventional_data_y,
mean_functions=self.mean_function,
variance_functions=self.variance_function,
bo_model=self.bo_model)
self._update_bo_model_constraints(best_es)
update_model_cls(
exploration_set=best_es,
fix_likelihood_noise_var=self.fix_likelihood_noise_var,
interventional_limits=self.interventional_constraint_variable_limits,
n_samples_per_intervention=self.n_samples_per_intervention,
hp_prior=self.hp_prior)
if self.verbose:
print(">>> Target model AFTER optimization")
plotting_utils.plot_models(self.bo_model, self.exploration_sets,
self.ground_truth, self.interventional_grids,
self.interventional_data_x,
self.interventional_data_y,
self.multi_task_model)
print(">>> Constraint model AFTER optimization")
plotting_utils.plot_models(
self.bo_model_constraints, self.exploration_sets,
self.ground_truth_constraints, self.interventional_grids,
self.interventional_data_x, self.interventional_data_y_constraints,
self.multi_task_model)
logging.info("Current feasibility: %s", self.feasibility)
logging.info("Current optimal y values: %s",
self.optimal_outcome_values_during_trials)
def _update_sufficient_statistics_constraints(
self, fitted_scm: Callable[[], Any]) -> None:
for es in self.exploration_sets:
for p in self.mean_function_constraints[es].keys():
(
self.mean_function_constraints[es][p],
self.var_function_constraints[es][p],
) = gp_utils.update_sufficient_statistics_hat(
graph=self.graph,
x=es,
y=p,
fitted_scm=fitted_scm,
true_scm_funcs=self.scm_funcs,
seed=self.sampling_seed,
mean_dict_store=self.mean_constraints_dict_store,
var_dict_store=self.var_constraints_dict_store,
n_samples=self.n_samples_per_intervention,
use_true_scm=self.use_true_scm)
def _get_current_feasible_global_target(self) -> Any:
"""Get the current feasible optimal target."""
out = []
feasible_interventions = {}
is_feasible = {}
for es in self.exploration_sets:
feasible_interventions[es] = list(
itertools.compress(self.interventional_data_y[es],
list(map(bool, self.feasibility[es][1:]))))
if not feasible_interventions[es]:
# There are no feasible interventions for es therefore we store the
# initial value we have for that set and denote it as not feasible
feasible_interventions[es] = self.interventional_data_y[es][0]
is_feasible[es] = False
else:
# If the list feasible_interventions[es] is not empty we have at least
# one feasible intervention
is_feasible[es] = True
# Take the optimal value
res = utilities.EVAL_FN[self.task](feasible_interventions[es])
out.append(res)
# Check if there is at least one feasible value. If yes only focus on those.
feasible_results = []
for key, val in is_feasible.items():
if val:
# There is at least one feasible value thus I only want to take the
# optimum among these values
feasible_results.append(
utilities.EVAL_FN[self.task](feasible_interventions[key])
)
if feasible_results:
out = feasible_results
return utilities.EVAL_FN[self.task](out)[0]
def _get_updated_interventional_data_constraints(
self, y_new_c: Dict[str, float], best_es: Tuple[str, ...]
) -> None:
"""Update interventional data for the constraints."""
for var in list(y_new_c.keys()):
if self.interventional_data_y_constraints[best_es][var] is not None:
# Append the new value
self.interventional_data_y_constraints[best_es][var] = np.append(
self.interventional_data_y_constraints[best_es][var],
np.array(y_new_c[var])[np.newaxis, np.newaxis],
)[:, np.newaxis]
else:
# Assign the first value
self.interventional_data_y_constraints[best_es][var] = np.array(
y_new_c[var]
)[np.newaxis, np.newaxis]
| ccbo-main | methods/ccbo.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""methods init."""
| ccbo-main | methods/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Causal Bayesian Optimisation."""
from __future__ import annotations
import collections
import random
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from absl import logging
from networkx.classes import multidigraph
import numpy as np
from ccbo.methods import cbo
from ccbo.utils import utilities
class Random(cbo.CBO):
"""Causal Bayesian Optimisation class."""
def __init__(
self,
graph: multidigraph.MultiDiGraph,
scm: Any,
make_scm_estimator: Callable[
[
multidigraph.MultiDiGraph,
Dict[Tuple[Optional[Any], ...], Any],
collections.OrderedDict[str, Any],
],
Any,
],
observation_samples: Dict[str, np.ndarray],
intervention_domain: Dict[str, Sequence[float]],
intervention_samples: Optional[
Dict[Tuple[str, ...], Optional[Dict[str, Any]]]
],
exploration_sets: List[Tuple[str, ...]],
number_of_trials: int,
ground_truth: Optional[Sequence[float]] = None,
task: utilities.Task = utilities.Task.MIN,
n_restart: int = 1,
cost_type: int = 1,
hp_prior: bool = True,
num_anchor_points: int = 100,
seed: int = 1,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
causal_prior: bool = True,
verbose: bool = False,
sampling_seed: Optional[int] = None,
noisy_observations: bool = False,
noisy_acquisition: bool = False,
fix_likelihood_noise_var: bool = True,
n_samples_per_intervention: int = 1,
use_prior_mean: bool = False,
size_intervention_grid: int = 100,
use_true_scm: bool = False):
super().__init__(
graph=graph,
scm=scm,
make_scm_estimator=make_scm_estimator,
observation_samples=observation_samples,
intervention_domain=intervention_domain,
intervention_samples=intervention_samples,
exploration_sets=exploration_sets,
task=task,
cost_type=cost_type,
number_of_trials=number_of_trials,
ground_truth=ground_truth,
n_restart=n_restart,
num_anchor_points=num_anchor_points,
verbose=verbose,
seed=seed,
sampling_seed=sampling_seed,
noisy_observations=noisy_observations,
noisy_acquisition=noisy_acquisition,
n_samples_per_intervention=n_samples_per_intervention,
fix_likelihood_noise_var=fix_likelihood_noise_var,
size_intervention_grid=size_intervention_grid,
use_true_scm=use_true_scm,
)
self.sample_anchor_points = sample_anchor_points
self.seed_anchor_points = seed_anchor_points
def run(self) -> None:
"""Run the BO loop."""
# Get current target and best_es
target = self.target_variable
best_es = self.best_initial_es
best_initial_level = self.best_initial_level
for it in range(self.number_of_trials):
logging.info("Trial: %s", it)
if it == 0:
# Store the initial set
self.best_es_over_trials_list.append(best_es)
self.best_level_over_trials_list.append(best_initial_level[0])
# Observe the system
self.observation_trial(best_es, target, it)
else:
# Intervene in the system
self._intervention_trial(target, it)
# Store optimal intervention
self.optimal_intervention = {
"set": self.best_es_over_trials,
"level": self.best_level_over_trials,
"outcome": self.optimal_outcome_values_during_trials[-1]
}
def _select_next_point(self, current_best_global_target: float,
it: int) -> Tuple[Tuple[str, ...], np.ndarray]:
best_es = random.choice(self.exploration_sets)
new_interventional_data_x = random.choice(
self.interventional_grids[best_es])
# Reshape point
if len(new_interventional_data_x.shape) == 1 and len(best_es) == 1:
new_interventional_data_x = utilities.make_column_shape_2d(
new_interventional_data_x)
elif len(best_es) > 1 and len(new_interventional_data_x.shape) == 1:
new_interventional_data_x = new_interventional_data_x.reshape(1, -1)
if new_interventional_data_x.shape[1] != len(best_es):
new_interventional_data_x = np.transpose(new_interventional_data_x)
else:
raise ValueError("The new point is not an array.")
return best_es, new_interventional_data_x
| ccbo-main | methods/random.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Causal Bayesian Optimisation."""
from __future__ import annotations
import collections
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from absl import logging
from networkx.classes import multidigraph
import numpy as np
from ccbo.acquisitions import evaluate_acquisitions
from ccbo.methods import base
from ccbo.utils import scm_utils
from ccbo.utils import utilities
class CBO(base.BaseMethod):
"""Causal Bayesian Optimisation class."""
def __init__(
self,
graph: multidigraph.MultiDiGraph,
scm: Any,
make_scm_estimator: Callable[
[
multidigraph.MultiDiGraph,
Dict[Tuple[Optional[Any], ...], Any],
collections.OrderedDict[str, Any],
],
Any,
],
observation_samples: Dict[str, np.ndarray],
intervention_domain: Dict[str, Sequence[float]],
intervention_samples: Optional[
Dict[Tuple[str, ...], Optional[Dict[str, Any]]]
],
exploration_sets: List[Tuple[str, ...]],
number_of_trials: int,
ground_truth: Optional[Dict[Tuple[str, ...], Any]] = None,
task: utilities.Task = utilities.Task.MIN,
n_restart: int = 1,
cost_type: int = 1,
hp_prior: bool = True,
num_anchor_points: int = 100,
seed: int = 1,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
causal_prior: bool = True,
verbose: bool = False,
sampling_seed: Optional[int] = None,
noisy_observations: bool = False,
noisy_acquisition: bool = False,
fix_likelihood_noise_var: bool = True,
n_samples_per_intervention: int = 1,
use_prior_mean: bool = False,
size_intervention_grid: int = 100,
use_true_scm: bool = False):
super().__init__(
graph=graph,
scm=scm,
make_scm_estimator=make_scm_estimator,
observation_samples=observation_samples,
intervention_domain=intervention_domain,
intervention_samples=intervention_samples,
exploration_sets=exploration_sets,
task=task,
cost_type=cost_type,
number_of_trials=number_of_trials,
ground_truth=ground_truth,
n_restart=n_restart,
num_anchor_points=num_anchor_points,
verbose=verbose,
causal_prior=causal_prior,
seed=seed,
sampling_seed=sampling_seed,
noisy_observations=noisy_observations,
noisy_acquisition=noisy_acquisition,
n_samples_per_intervention=n_samples_per_intervention,
fix_likelihood_noise_var=fix_likelihood_noise_var,
size_intervention_grid=size_intervention_grid,
use_prior_mean=use_prior_mean,
use_true_scm=use_true_scm,
hp_prior=hp_prior)
self.sample_anchor_points = sample_anchor_points
self.seed_anchor_points = seed_anchor_points
# Fit Gaussian processes to functions in the SCM
self.fitted_scm_fncs = scm_utils.fit_scm_fncs(self.graph,
self.observational_samples,
self.scm_funcs,
self.n_restart)
def run(self) -> None:
"""Run the BO loop."""
# Get current target and best_es
target = self.target_variable
best_es = self.best_initial_es
best_initial_level = self.best_initial_level
for it in range(self.number_of_trials):
logging.info("Trial: %s", it)
if it == 0:
# Store the initial set
self.best_es_over_trials_list.append(best_es)
self.best_level_over_trials_list.append(best_initial_level[0])
# Observe the system
self.observation_trial(best_es, target, it)
else:
# Intervene in the system
self._intervention_trial(target, it)
# Store optimal intervention
self.optimal_intervention = {
"set": self.best_es_over_trials,
"level": self.best_level_over_trials,
"outcome": self.optimal_outcome_values_during_trials[-1]
}
def _update_all_surrogate_models(self)-> None:
"""Update surrogate models (GPs) on the target functions."""
for es in self.exploration_sets:
if (self.interventional_data_x[es] is not None and
self.interventional_data_y[es] is not None):
self._update_bo_model(
data_x=self.interventional_data_x,
data_y=self.interventional_data_y,
mean_functions=self.mean_function,
variance_functions=self.variance_function,
bo_model=self.bo_model,
exploration_set=es,
hp_prior=self.hp_prior,
fix_likelihood_noise_var=self.fix_likelihood_noise_var,
interventional_limits=self.interventional_variable_limits,
n_samples_per_intervention=self.n_samples_per_intervention)
def _intervention_trial(self, target: str, it: int)-> None:
"""Run one intervention trial of the BO loop."""
# Update surrogate models if we have observe at the previous trial
if self.trial_type[-1] == utilities.Trial.OBSERVATION:
self._update_all_surrogate_models()
# Run the actual per trial computation
self._per_trial_computations(it, target)
def observation_trial(
self,
best_es: Tuple[str, ...],
target: str,
it: int = 0,
observation_cost: float = 0.0) -> Callable[
[multidigraph.MultiDiGraph, Dict[Tuple[Optional[Any], ...],
Any]], Any]:
"""Run one observation trial of the BO loop."""
self.trial_type.append(utilities.Trial.OBSERVATION)
# Given the fitted functions construct an approximate simulator
fitted_scm = self.make_scm_estimator(self.graph, self.fitted_scm_fncs,
self.scm_funcs)
# Create mean functions and var functions (prior parameters) for the GPs
# on the target functions. This is done using the observational data.
self._update_sufficient_statistics(target=target, fitted_scm=fitted_scm)
# Store the current optimal value. As we have observed at this trial
# we don't have a new value so we store the previous one.
self.optimal_outcome_values_during_trials.append(
self.outcome_values[-1])
if self.interventional_data_x[best_es] is None:
self.optimal_intervention_levels[best_es][it] = np.nan
# Store the cost of observing which is assumed to be zero
self.per_trial_cost.append(observation_cost)
return fitted_scm
def _evaluate_acquisition_functions(self, current_best_global_target: float,
it: int) -> None:
"""Evaluate the acquisition function given the surrogate models."""
for es in self.exploration_sets:
# Get the GP model for es
if (
self.interventional_data_x[es] is not None
and self.interventional_data_y[es] is not None
):
bo_model = self.bo_model[es]
else:
bo_model = None
# The seed of the anchor points is used when the points at which
# to evaluate the acquisition functions are sampled uniformly
if self.seed_anchor_points is None:
seed_to_pass = None
else:
# Use a fixed seed for reproducibility but ensure the anchor points
# used for optimization of the acqusition functions are different
seed_to_pass = int(self.seed_anchor_points * it)
(
self.y_acquired[es],
self.corresponding_x[es],
) = evaluate_acquisitions.evaluate_acquisition_function(
self.intervention_exploration_domain[es],
bo_model,
self.mean_function[es],
self.variance_function[es],
current_best_global_target,
es,
self.cost_functions,
self.task,
self.target_variable,
noisy_acquisition=self.noisy_acquisition,
num_anchor_points=self.num_anchor_points,
sample_anchor_points=self.sample_anchor_points,
seed_anchor_points=seed_to_pass,
verbose=self.verbose)
def _select_next_point(self, current_best_global_target: float,
it: int) -> Tuple[Tuple[str, ...], np.ndarray]:
# Compute acquisition function given the updated BO models for the
# interventional data. Notice that we use current_best_global_target
# and the costs to compute the acquisition functions.
self._evaluate_acquisition_functions(current_best_global_target, it)
# Best exploration set based on acquired target-values.
# Notice that independently on the maximization or minimization task
# here we always need to optimize to select the point giving the maximum of
# the expected improvement
best_es = max(self.y_acquired, key=self.y_acquired.get)
# Get the correspoding intervention value for best_es
new_interventional_data_x = self.corresponding_x[best_es]
self._check_new_point(best_es)
return best_es, new_interventional_data_x
| ccbo-main | methods/cbo.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract class for Bayesian optimization methods."""
from __future__ import annotations
import abc
import collections
import copy
import functools
import random
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from absl import logging
from emukit.model_wrappers import gpy_model_wrappers
from GPy import core
from GPy import models
from GPy.core.parameterization import priors
from GPy.kern.src import rbf
from networkx.classes import multidigraph
import numpy as np
from ccbo.kernels import causal_kernel
from ccbo.scm_examples import base
from ccbo.utils import cost_utils
from ccbo.utils import gp_utils
from ccbo.utils import initialisation_utils
from ccbo.utils import intervention_functions
from ccbo.utils import plotting_utils
from ccbo.utils import utilities
class BaseMethod(abc.ABC):
"""Base class with common methods and variables for all BO methods."""
def __init__(
self,
graph: multidigraph.MultiDiGraph,
scm: base.ScmExample,
observation_samples: Dict[str, np.ndarray],
intervention_domain: Dict[str, Sequence[float]],
exploration_sets: List[Tuple[str, ...]],
intervention_samples: Optional[Dict[Tuple[str, ...],
Dict[str, Any]]] = None,
make_scm_estimator: Optional[Callable[[
multidigraph.MultiDiGraph, Dict[Tuple[Optional[Any], ...], Any],
collections.OrderedDict[str, Any]], Any]] = None,
task: utilities.Task = utilities.Task.MIN,
cost_type: int = 1,
number_of_trials: int = 10,
ground_truth: Optional[Dict[Tuple[str, ...], Any]] = None,
n_restart: int = 1,
num_anchor_points: int = 100,
verbose: bool = False,
size_intervention_grid: int = 100,
causal_prior: bool = True,
seed: int = 1,
sampling_seed: Optional[int] = None,
noisy_observations: bool = False,
noisy_acquisition: bool = False,
n_samples_per_intervention: int = 1,
fix_likelihood_noise_var: bool = True,
gt_samples: int = 100,
hp_prior: bool = True,
use_prior_mean: bool = False,
use_true_scm: bool = False):
# Set the seed used for GP optimization
self.seed = seed
# Set the seed used for sampling from the estimated SCM and for sampling
# the values of the target functions when assuming noisy observations
self.sampling_seed = sampling_seed
# The true SCM is used in the target function evaluation
self.scm_funcs = scm.scm_funcs
# Build estimator for the function in the SCM
self.make_scm_estimator = make_scm_estimator
# Get the DAG
self.graph = graph
# Number of optimization restart for GPs
self.n_restart = n_restart
# Observational data
self.observational_samples = observation_samples
# List of all variables names
self.all_vars = list(self.observational_samples.keys())
# Target variable
self.target_variable = scm.get_target_name()
# Number of trials for BO loop
self.number_of_trials = number_of_trials
# Initialise optimal value according to min / max objective function
self.task = task
self.init_val = (
np.inf if self.task.value == utilities.Task.MIN.value else -np.inf
)
# Manipulative variables
self.manipulative_variables = [
list(scm.variables.keys())[i]
for i, val in enumerate(list(scm.variables.values()))
if utilities.VariableType.MANIPULATIVE.value in val
]
# Interventional domain
self.interventional_variable_limits = intervention_domain
assert self.manipulative_variables == list(intervention_domain.keys())
# Intervention sets to explore
if exploration_sets:
assert isinstance(exploration_sets, list)
self.exploration_sets = exploration_sets
else:
self.exploration_sets = list(
initialisation_utils.powerset(self.manipulative_variables))
# Get the interventional grids for plotting
self.interventional_grids = (
initialisation_utils.get_interventional_grids(
self.exploration_sets,
intervention_domain,
size_intervention_grid=size_intervention_grid))
# Whether to use observational data to build the prior
self.causal_prior = causal_prior
# Use the estimated causal effect as prior mean of the surrogate model
self.use_prior_mean = use_prior_mean
# Objective function params.
self.bo_model = {es: None for es in self.exploration_sets}
# Target functions for Bayesian optimisation - ground truth.
self.target_functions = copy.deepcopy(self.bo_model)
# Initialize a dictionary to store the noiseless values of the target
# functions is dealing with noisy observations
self.noiseless_target_functions = copy.deepcopy(self.bo_model)
# Store true objective function.
self.ground_truth = ground_truth
# Number of points where to evaluate acquisition function.
self.num_anchor_points = num_anchor_points
# Hyperparameters for GPs assigned during optimisation.
self.mean_function = copy.deepcopy(self.bo_model)
self.variance_function = copy.deepcopy(self.bo_model)
# Store dicts for mean and var values computed in the acquisition function.
self.mean_dict_store = {es: {} for es in self.exploration_sets}
self.var_dict_store = copy.deepcopy(self.mean_dict_store)
# Initial optimal solutions
if intervention_samples:
# If initial interventional data is provided
(
initial_optimal_intervention_sets,
initial_optimal_target_values,
initial_optimal_intervention_levels,
self.interventional_data_x,
self.interventional_data_y,
) = initialisation_utils.initialise_interventional_objects(
self.exploration_sets,
intervention_samples,
target=self.target_variable,
task=utilities.EVAL_FN[self.task])
else:
# No initial interventional data is provided
initial_optimal_intervention_sets = random.choice(self.exploration_sets)
initial_optimal_target_values = None
initial_optimal_intervention_levels = None
self.interventional_data_x = copy.deepcopy(self.bo_model)
self.interventional_data_y = copy.deepcopy(self.bo_model)
# Dict indexed by the global exploration sets, stores the best
self.outcome_values = (
initialisation_utils.initialise_global_outcome_dict_new(
initial_optimal_target_values, self.init_val
)
)
# Initialize list to store the optimal outcome values
self.optimal_outcome_values_during_trials = []
# Set the observations to be noisy or noiseless evaluations of the target
self.noisy_observations = noisy_observations
# Set the acquisition function to be the noisy version
self.noisy_acquisition = noisy_acquisition
self.optimal_intervention_levels = (
initialisation_utils.initialise_optimal_intervention_level_list(
self.exploration_sets,
initial_optimal_intervention_sets,
initial_optimal_intervention_levels,
number_of_trials,
)
)
self.best_initial_es = initial_optimal_intervention_sets[0]
self.best_initial_level = initial_optimal_intervention_levels
self.best_es_over_trials_list = []
self.best_level_over_trials_list = []
# Whether to learn the variance of the likelihood noise or set it
self.fix_likelihood_noise_var = fix_likelihood_noise_var
# Set the number of samples from the interventional distribution we get
# every time we perform an intervention. This is used when
# noisy_observations = True. When instead noisy_observations = False there
# is no randomness in the samples thus we only need one.
self.n_samples_per_intervention = n_samples_per_intervention
# Store true target function to simulate interventions for each set
for es in self.exploration_sets:
self.target_functions[
es] = intervention_functions.evaluate_target_function(
self.scm_funcs, es, self.all_vars, self.noisy_observations,
self.sampling_seed, self.n_samples_per_intervention)
# Store the noiseless target function so we can evaluata a posteriori if
# the algorithm collected feasible interventions. This is used when
# gt_samples is different from self.n_samples_per_intervention so that every
# experiments gives a noisy evaluation of the target and the constraints
for es in self.exploration_sets:
self.noiseless_target_functions[
es] = intervention_functions.evaluate_target_function(
self.scm_funcs, es, self.all_vars, self.noisy_observations,
self.sampling_seed, gt_samples)
# Parameter space for optimisation
self.intervention_exploration_domain = (
initialisation_utils.create_intervention_exploration_domain(
self.exploration_sets, intervention_domain))
# Optimisation specific parameters to initialise
self.trial_type = [] # If we observed or intervened during the trial
self.cost_functions = cost_utils.define_costs(self.manipulative_variables,
cost_type)
self.per_trial_cost = []
# Acquisition function specifics
self.y_acquired = {es: None for es in self.exploration_sets}
self.corresponding_x = copy.deepcopy(self.y_acquired)
# Initialise best intervention set and best intervention level over trials
self.best_es_over_trials = self.best_initial_es
self.best_level_over_trials = self.best_initial_level
# Initialise the variable for storing the optimal intervention
self.optimal_intervention = None
# Use hyperprior on the hyperparameters of the GP model
self.hp_prior = hp_prior
# Debugging
self.verbose = verbose
self.use_true_scm = use_true_scm
def _update_bo_model(
self,
data_x: Any,
data_y: Dict[Tuple[str, ...], np.ndarray],
mean_functions: Dict[
Optional[Tuple[str, ...]], Callable[[np.ndarray], np.ndarray]
],
variance_functions: Dict[
Optional[Tuple[str, ...]], Callable[[np.ndarray], np.ndarray]
],
bo_model: Dict[
Optional[Tuple[str, ...]],
Optional[gpy_model_wrappers.GPyModelWrapper],
],
exploration_set: Tuple[str, ...],
n_samples_per_intervention: int,
alpha: float = 2,
beta: float = 0.5,
beta_l: float = 1.5,
lengthscale: float = 1.0,
variance: float = 1.0,
fix_likelihood_noise_var: bool = True,
interventional_limits: Optional[Dict[str, Sequence[float]]] = None,
ard: bool = False,
hp_prior: bool = True,
intervention_set=None,
) -> None:
"""Update GP model on causal effect for exploration_set."""
# Check data for the model exist
assert data_y[exploration_set] is not None
# Get the data
x = data_x[exploration_set] if isinstance(data_x, dict) else data_x
y = data_y[exploration_set]
input_dim = len(intervention_set) if intervention_set else len(
exploration_set)
# Set the likelihood noise proportional to the number of interventional
# samples we get after each experiment
lik_noise_var = (1./n_samples_per_intervention)
partial_model = functools.partial(
models.GPRegression, X=x, Y=y, noise_var=lik_noise_var)
# Specify mean function
if not self.use_prior_mean:
mf = None
else:
mf = core.Mapping(input_dim, 1)
mf.f = mean_functions[exploration_set]
mf.update_gradients = lambda a, b: None
# Initialize the model
if self.causal_prior:
# Set kernel
kernel = causal_kernel.CausalRBF(
input_dim=input_dim,
variance_adjustment=variance_functions[exploration_set],
lengthscale=lengthscale,
variance=variance,
ard=ard)
else:
kernel = rbf.RBF(input_dim, lengthscale=lengthscale, variance=variance)
model = partial_model(kernel=kernel, mean_function=mf)
# Place a prior on kernel hyperparameters to get a MAP
if hp_prior:
# Numerical stability issues
# see https://github.com/SheffieldML/GPy/issues/735
gamma = priors.Gamma(a=alpha, b=beta)
model.kern.variance.set_prior(gamma)
if interventional_limits:
# We set the hyperparameter for the GP lenghscale looking at the
# interventional grid for each variable included in the inputs of the GP
alpha_l = gp_utils.get_lenghscale_hp(exploration_set,
interventional_limits)
gamma = priors.Gamma(a=alpha_l, b=beta_l)
model.kern.lengthscale.set_prior(gamma)
if fix_likelihood_noise_var:
# Fix likelihood variance to a very small value
model.likelihood.variance.fix(1e-5)
if self.verbose:
print("Optimizing the model for:", exploration_set)
print("Model BEFORE optimizing:", model)
# Prevent randomization from affecting the optimization of the GPs
old_seed = np.random.get_state()
np.random.seed(self.seed)
# With num_restarts we repeat the optimization multiple times and pick the
# hyperparameters giving the highest likelihood
model.optimize_restarts(num_restarts=self.n_restart)
np.random.set_state(old_seed)
if self.verbose:
print("Model AFTER optimizing:", model)
# Assign the model to the exploration set
bo_model[exploration_set] = gpy_model_wrappers.GPyModelWrapper(model)
# Avoid numerical issues due to the optization of the kernel hyperpars
self._safe_optimization(bo_model[exploration_set])
def _select_next_point(self, *args) -> Tuple[Tuple[str, ...], np.ndarray]:
raise NotImplementedError(
"_select_next_point method has not been implemented for"
"this class")
def _check_new_point(self, best_es: Tuple[str, ...]) -> None:
"""Check that new intervention point is in the intervention domain."""
assert best_es is not None, (best_es, self.y_acquired)
assert best_es in self.exploration_sets
assert self.intervention_exploration_domain[best_es].check_points_in_domain(
self.corresponding_x[best_es])[0], (
best_es,
self.y_acquired,
self.corresponding_x,
)
def _safe_optimization(self,
bo_model: gpy_model_wrappers.GPyModelWrapper,
bound_var=1e-02,
bound_len=20.0) -> None:
"""Avoid numerical instability in the optimization of the GP hyperpars."""
if bo_model.model.kern.variance[0] < bound_var: # pytype: disable=attribute-error
bo_model.model.kern.variance[0] = 1.0 # pytype: disable=attribute-error
if bo_model.model.kern.lengthscale[0] > bound_len: # pytype: disable=attribute-error
bo_model.model.kern.lengthscale[0] = 1.0 # pytype: disable=attribute-error
def _get_updated_interventional_data(self, x_new: np.ndarray, y_new: float,
best_es: Tuple[str, ...]) -> None:
"""Updates interventional data."""
data_x, data_y = utilities.check_reshape_add_data(
self.interventional_data_x, self.interventional_data_y, x_new, y_new,
best_es)
self.interventional_data_x[best_es] = data_x
self.interventional_data_y[best_es] = data_y
def _update_sufficient_statistics(
self, target: str, fitted_scm: Callable[[], Any]) -> None:
"""Update mean and variance functions of the causal prior (GP).
Args:
target : The full node name of the target variable.
fitted_scm : Fitted SCM.
"""
for es in self.exploration_sets:
(self.mean_function[es],
self.variance_function[es]) = gp_utils.update_sufficient_statistics_hat(
graph=self.graph,
y=target,
x=es,
fitted_scm=fitted_scm,
true_scm_funcs=self.scm_funcs,
seed=self.sampling_seed,
mean_dict_store=self.mean_dict_store,
var_dict_store=self.var_dict_store,
n_samples=self.n_samples_per_intervention,
use_true_scm=self.use_true_scm)
def _per_trial_computations(self, it: int, target: str) -> None:
"""Performs computations for each trial iteration for specific target."""
logging.info(">>>")
logging.info("Iteration: %s", it)
logging.info("<<<")
if self.verbose:
print(">>> Target model BEFORE optimization")
plotting_utils.plot_models(self.bo_model, self.exploration_sets,
self.ground_truth, self.interventional_grids,
self.interventional_data_x,
self.interventional_data_y)
# Presently find the optimal value of Y_t
current_best_global_target = utilities.EVAL_FN[self.task](
self.outcome_values)
if self.verbose:
logging.info("Current_best_global_target: %s", current_best_global_target)
# Indicate that in this trial we are explicitly intervening in the system
self.trial_type.append(utilities.Trial.INTERVENTION)
best_es, new_interventional_data_x = self._select_next_point(
current_best_global_target, it)
# Get the correspoding outcome values for best_es
y_new = self.target_functions[best_es](
target, np.squeeze(new_interventional_data_x))
# Store intervened set
self.best_es_over_trials_list.append(best_es)
self.best_level_over_trials_list.append(new_interventional_data_x)
if self.verbose:
logging.info("Selected set: %s", best_es)
logging.info("Intervention value: %s", new_interventional_data_x)
logging.info("Outcome: %s", y_new)
# Update interventional data
self._get_updated_interventional_data(new_interventional_data_x, y_new,
best_es)
# Evaluate cost of intervention
self.per_trial_cost.append(
cost_utils.total_intervention_cost(
best_es,
self.cost_functions,
self.interventional_data_x[best_es],
))
# Store optimal outcome values
self.outcome_values.append(y_new)
self.optimal_outcome_values_during_trials.append(
utilities.EVAL_FN[self.task](y_new, current_best_global_target))
new_best_solution = utilities.ARG_EVAL_FN[self.task](
(y_new, current_best_global_target))
self.best_es_over_trials = (best_es,
self.best_es_over_trials)[new_best_solution]
self.best_level_over_trials = (
new_interventional_data_x,
self.best_level_over_trials)[new_best_solution]
# Store the intervention
if len(new_interventional_data_x.shape) != 2:
self.optimal_intervention_levels[best_es][
it] = utilities.make_column_shape_2d(new_interventional_data_x)
else:
self.optimal_intervention_levels[best_es][it] = new_interventional_data_x
# Update the BO model for best_es
self._update_bo_model(
data_x=self.interventional_data_x,
data_y=self.interventional_data_y,
mean_functions=self.mean_function,
variance_functions=self.variance_function,
bo_model=self.bo_model,
exploration_set=best_es,
hp_prior=self.hp_prior,
fix_likelihood_noise_var=self.fix_likelihood_noise_var,
interventional_limits=self.interventional_variable_limits,
n_samples_per_intervention=self.n_samples_per_intervention)
if self.verbose:
print(">>> Target model AFTER optimization")
plotting_utils.plot_models(self.bo_model, self.exploration_sets,
self.ground_truth, self.interventional_grids,
self.interventional_data_x,
self.interventional_data_y)
| ccbo-main | methods/base.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experiments.run_optimization."""
import unittest
import numpy as np
from ccbo.experiments import data
from ccbo.methods import cbo
from ccbo.methods import ccbo
from ccbo.utils import initialisation_utils
from ccbo.utils import sampling_utils
from ccbo.utils import scm_utils
class RunOptimizationTest(unittest.TestCase):
def test_run_optimization(self):
example = data.EXAMPLES_DICT["synthetic1"]()
scm = example.structural_causal_model(
variables=("X", "Z"), lambdas=(1., 2.))
constraints = scm.constraints
graph = scm.graph
exploration_sets = (("X",), ("Z",))
intervention_domain = {"X": [-3, 2], "Z": [-1, 1]}
precision = 5
expected_values = {
"cbo":
np.array([-0.27992, -0.27992, -0.39242]),
"ccbo_single_task":
np.array([-0.27992, -0.39242, -0.39242]),
"ccbo_single_task_causal_prior":
np.array([-0.27992, -0.91705, -0.91705]),
"ccbo_multi_task":
np.array([-0.27992, -0.29237, -0.39242]),
"ccbo_multi_task_causal_prior":
np.array([-0.27992, -0.39242, -0.39242]),
"ccbo_dag_multi_task":
np.array([-0.27992, -0.39242, -0.39242])
}
# Generate observational data by sampling from the true
# observational distribution
d_o = sampling_utils.sample_scm(
scm_funcs=scm.scm_funcs,
graph=None,
n_samples=5,
compute_moments=False,
seed=1)
# Generate interventional data
d_i = {k: None for k in exploration_sets}
for var, level in zip(exploration_sets, ((1.,), (0.,))):
initialisation_utils.assign_interventions(
variables=var,
levels=level,
n_samples_per_intervention=100,
sampling_seed=1,
d_i=d_i,
graph=graph,
scm_funcs=scm.scm_funcs)
for model_name in expected_values:
use_causal_prior = model_name in [
"ccbo_single_task_causal_prior", "ccbo_dag_multi_task"
]
is_multi_task = model_name in [
"ccbo_multi_task", "ccbo_multi_task_causal_prior",
"ccbo_dag_multi_task"
]
use_prior_mean = model_name in ["ccbo_single_task_causal_prior",
"ccbo_multi_task_causal_prior",
"ccbo_dag_multi_task"]
# Setup input params
input_params = {
"graph": graph,
"scm": scm,
"make_scm_estimator": scm_utils.build_fitted_scm,
"exploration_sets": list(exploration_sets),
"observation_samples": d_o,
"intervention_samples": d_i,
"intervention_domain": intervention_domain,
"number_of_trials": 3,
"sample_anchor_points": False,
"num_anchor_points": 5,
"sampling_seed": 1,
"n_restart": 1,
"causal_prior": use_causal_prior,
"hp_prior": True,
# Noisy observations
"noisy_observations": True,
"n_samples_per_intervention": 100
}
if model_name == "cbo":
model = cbo.CBO(**input_params)
else:
# Add constraints
input_params["constraints"] = constraints
input_params["multi_task_model"] = is_multi_task
input_params["use_prior_mean"] = use_prior_mean
if model_name == "ccbo_dag_multi_task":
# Monte Carlo construction of the kernel
input_params["n_kernel_samples"] = 10
model = ccbo.CCBO(**input_params)
# Run method
model.run()
np.testing.assert_array_almost_equal(
model.optimal_outcome_values_during_trials,
expected_values[model_name], precision)
if __name__ == "__main__":
unittest.main()
| ccbo-main | experiments/run_optimization_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for Synthetic1 experiment."""
import ml_collections
def get_config():
"""Return the default configuration."""
config = ml_collections.ConfigDict()
# Name associated with this SCM
config.example_name = 'synthetic1'
config.n_trials = 50 # Number of trials to run.
config.n_samples_obs = 100 # Number of initial observational data points.
# Number of samples per interventional distribution.
config.n_samples_per_intervention = 100
# Number to sample to use to get the ground truth function
config.n_samples_ground_truth = 100
# Seed to use to sample the anchor points.
config.seed_anchor_points = 1
# Use a regular grid of points to evaluate the acquisition function
# or sample points uniformly.
config.sample_anchor_points = False
# Number of points on a regular grid to evaluate the acquisition function.
config.n_grid_points = 100
# Learn or fix the likelihoood noise in the GP model.
config.fix_likelihood_noise_var = True
# Learn or fix the likelihoood noise in the GP model.
config.noisy_acquisition = False
config.intervention_variables = (('X',), ('Z',)) # Intervention variables.
config.intervention_levels = ((1.,), (0.,)) # Intervention values.
config.constraints = ml_collections.ConfigDict()
config.constraints.variables = ('X', 'Z')
config.constraints.lambdas = (1., 1.) # Constraint values for 'X', 'Z'.
config.exploration_sets = (('X',), ('Z',)) # Exploration sets
# Sum the RBF kernel to the Monte Carlo one
config.add_rbf_kernel = False
# Whethere to update the SCM at every iteration for G-MTGP
config.update_scm = False
# Use hp_prior in kernel
config.use_hp_prior = True
# Number of samples for the kernel computation
config.n_kernel_samples = 10
# Specify which model to run with possible values:
# "cbo", "ccbo_single_task", "ccbo_single_task_causal_prior",
# "ccbo_multi_task", "ccbo_multi_task_causal_prior", "ccbo_dag_multi_task"
config.model_name = 'ccbo_single_task'
return config
| ccbo-main | experiments/config_synthetic1.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run experiment."""
from __future__ import annotations
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_flags
from ccbo.experiments import data
from ccbo.methods import cbo
from ccbo.methods import ccbo
from ccbo.methods import random
from ccbo.utils import constraints_functions
from ccbo.utils import initialisation_utils
from ccbo.utils import sampling_utils
from ccbo.utils import scm_utils
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file("config",
"ccbo/experiments/config_synthetic1.py")
### FIXED PARAMETERS ###
# Sampling seed for the ground truth and the sampling of the target function
sampling_seed = 1
# Whether to use noisy observations of the target and the constraints
noisy_observations = True
# Produce plot and print statistics
verbose = False
# Number of restarts of GP optimization
n_restart = 5
def main(_):
flags_config = FLAGS.config
logging.info("Flags dict is %s", flags_config)
### MISCELLANEOUS PREPARATION ###
example = data.EXAMPLES_DICT[flags_config.example_name]()
scm = example.structural_causal_model(**flags_config.constraints)
graph = scm.graph
constraints = scm.constraints
(_, _, intervention_domain, all_ce,
_, constraints_values, _,
_, _,
_) = scm.setup(
n_grid_points=flags_config.n_grid_points,
exploration_sets=list(flags_config.exploration_sets),
n_samples=flags_config.n_samples_ground_truth,
sampling_seed=sampling_seed)
### GENERATE INITIAL DATA ###
# Generate observational data by sampling from the true
# observational distribution
d_o = sampling_utils.sample_scm(
scm_funcs=scm.scm_funcs,
graph=None,
n_samples=flags_config.n_samples_obs,
compute_moments=False,
seed=sampling_seed)
# Generate interventional data
d_i = {k: None for k in flags_config.exploration_sets}
for var, level in zip(flags_config.intervention_variables,
flags_config.intervention_levels):
initialisation_utils.assign_interventions(
variables=var,
levels=level,
n_samples_per_intervention=flags_config.n_samples_per_intervention,
sampling_seed=sampling_seed,
d_i=d_i,
graph=graph,
scm_funcs=scm.scm_funcs)
### RUN THE ALGORITHM ###
model_name = flags_config.model_name
use_causal_prior = model_name in [
"ccbo_single_task_causal_prior", "ccbo_dag_multi_task"
]
is_multi_task = model_name in [
"ccbo_multi_task", "ccbo_multi_task_causal_prior", "ccbo_dag_multi_task"
]
use_prior_mean = model_name in ["ccbo_single_task_causal_prior",
"ccbo_multi_task_causal_prior",
"ccbo_dag_multi_task"]
add_rbf_kernel = (
flags_config.add_rbf_kernel and model_name in ["ccbo_dag_multi_task"])
update_scm = flags_config.update_scm and model_name in ["ccbo_dag_multi_task"]
# Setup input params
input_params = {
"graph": graph,
"scm": scm,
"make_scm_estimator": scm_utils.build_fitted_scm,
"exploration_sets": list(flags_config.exploration_sets),
"observation_samples": d_o,
"intervention_samples": d_i,
"intervention_domain": intervention_domain,
"number_of_trials": flags_config.n_trials,
"sample_anchor_points": flags_config.sample_anchor_points,
"seed_anchor_points": flags_config.seed_anchor_points,
"num_anchor_points": flags_config.n_grid_points,
"ground_truth": all_ce,
"sampling_seed": sampling_seed,
"n_restart": n_restart,
"verbose": verbose,
"causal_prior": use_causal_prior,
"hp_prior": flags_config.use_hp_prior,
# Noisy observations
"noisy_observations": noisy_observations,
"noisy_acquisition": flags_config.noisy_acquisition,
"n_samples_per_intervention": flags_config.n_samples_per_intervention,
"fix_likelihood_noise_var": flags_config.fix_likelihood_noise_var
}
if model_name == "cbo":
model = cbo.CBO(**input_params)
elif model_name == "random":
model = random.Random(**input_params)
else:
# Add constraints
input_params["ground_truth_constraints"] = constraints_values
input_params["constraints"] = constraints
input_params["multi_task_model"] = is_multi_task
input_params["use_prior_mean"] = use_prior_mean
input_params["update_scm"] = update_scm
input_params["add_rbf_kernel"] = add_rbf_kernel
if model_name == "ccbo_dag_multi_task":
# Monte Carlo construction of the kernel
input_params["n_kernel_samples"] = flags_config.n_kernel_samples
model = ccbo.CCBO(**input_params)
# Run method
model.run()
# If model is not constrained compute feasibility after running it
if model_name in ["random", "cbo"]:
(constraints_dict, _, _, _,
_) = constraints_functions.get_constraints_dicts(
flags_config.exploration_sets, constraints, graph,
model.target_variable, d_o)
for i, v in enumerate(model.best_es_over_trials_list):
if len(v) > 1:
value = model.best_level_over_trials_list[i].tolist()[0]
else:
value = model.best_level_over_trials_list[i]
is_feasible, _, _, _ = constraints_functions.verify_feasibility(
optimal_unconstrained_set=v,
optimal_level=value,
exploration_sets=flags_config.exploration_sets,
all_ce=all_ce,
constraints=constraints,
constraints_dict=constraints_dict,
scm_funcs=scm.scm_funcs,
graph=graph,
dict_variables=scm.variables,
interventional_grids=model.interventional_grids,
n_samples=flags_config.n_samples_ground_truth,
sampling_seed=sampling_seed,
task=model.task,
)
model.best_es_over_trials_list[i] = [v, int(is_feasible)]
logging.info(
"The optimal intervention set found by the algorithm is %s",
model.best_es_over_trials_list[-1][0],
)
logging.info(
"The optimal target effect value found by the algorithm is %s",
model.optimal_outcome_values_during_trials[-1],
)
if __name__ == "__main__":
app.run(main)
| ccbo-main | experiments/run_optimization.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""experiments init."""
| ccbo-main | experiments/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for Synthetic2 experiment."""
import ml_collections
def get_config():
"""Return the default configuration for synthetic2 (Fig 1(d) in the paper) example."""
config = ml_collections.ConfigDict()
# Name associated with this SCM
config.example_name = 'synthetic2'
config.n_trials = 60 # Number of trials to run.
config.n_samples_obs = 100 # Number of initial observational data points.
# Number of samples per interventional distribution.
config.n_samples_per_intervention = 100
# Number to sample to use to get the ground truth function
config.n_samples_ground_truth = 100
# Seed to use to sample the anchor points.
config.seed_anchor_points = 1
# Use a regular grid of points to evaluate the acquisition function
# or sample points uniformly.
config.sample_anchor_points = False
# Number of points on a regular grid to evaluate the acquisition function.
config.n_grid_points = 100
# Learn or fix the likelihoood noise in the GP model.
config.fix_likelihood_noise_var = True
# Learn or fix the likelihoood noise in the GP model.
config.noisy_acquisition = False
# First type
config.intervention_variables = (
('A',), ('D',), ('E',), ('A', 'D'), ('A', 'E'), ('D', 'E')
) # Intervention variables.
config.intervention_levels = (
(0.,), (1.,), (1.,), (0., 1.), (0., 1.), (1., 1.)) # Intervention values.
config.constraints = ml_collections.ConfigDict()
config.constraints.variables = ('C', 'D', 'E')
config.constraints.lambdas = (10., 10., 10.) # Constraint values
# Exploration sets
config.exploration_sets = (('A',), ('D',), ('E',), ('A', 'D'), ('A', 'E'),
('D', 'E'))
# Sum the RBF kernel to the Monte Carlo one
config.add_rbf_kernel = False
# Whethere to update the SCM at every iteration for G-MTGP
config.update_scm = False
# Use hp_prior in kernel
config.use_hp_prior = True
# Number of samples for the kernel computation
config.n_kernel_samples = 10
# Specify which model to run with possible values:
# "cbo", "ccbo_single_task", "ccbo_single_task_causal_prior",
# "ccbo_multi_task", "ccbo_multi_task_causal_prior", "ccbo_dag_multi_task"
config.model_name = 'ccbo_single_task'
return config
| ccbo-main | experiments/config_synthetic2.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SCM examples that we run experiments on."""
from __future__ import annotations
import abc
import collections
from typing import Any, Optional, Tuple
import graphviz
from networkx.classes import multidigraph
from networkx.drawing import nx_agraph
import numpy as np
import pygraphviz
from scipy import stats
from ccbo.scm_examples import scm
from ccbo.utils import utilities
class BaseExample(abc.ABC):
"""Abstract class for experiment examples."""
def __init__(self):
self._variables = None
self._constraints = None
@property
def constraints(self) -> Any:
return self._constraints
@property
def variables(self) -> Any:
"""Returns the variables dictionary."""
return self._variables
@abc.abstractproperty # pylint: disable=deprecated-decorator
def scm_funcs(self) -> collections.OrderedDict[str, Any]:
"""Returns the functions of the structural causal model."""
raise NotImplementedError("scm_funcs should be implemented")
@abc.abstractmethod
def structural_causal_model(self, variables: Optional[Tuple[str, ...]],
lambdas: Optional[Tuple[float, ...]]) -> Any:
"""Returns the scm with fncs, variables and constraints."""
raise NotImplementedError("structural_causal_model should be implemented")
class SyntheticExample1(BaseExample):
"""Synthetic example #1 - corresponds to DAG 1(c) in the cCBO paper."""
@property
def scm_funcs(self) -> collections.OrderedDict[str, Any]:
"""Define functions in SCM."""
x = lambda noise, sample: noise
z = lambda noise, sample: np.exp(-sample["X"]) + noise
y = lambda noise, sample: np.cos(sample["Z"]) - np.exp(-sample["Z"] / 20.0 # pylint: disable=g-long-lambda
) + noise
return collections.OrderedDict([("X", x), ("Z", z), ("Y", y)])
def structural_causal_model(self,
variables: Optional[Tuple[str, ...]],
lambdas: Optional[Tuple[float, ...]]) -> Any:
self._variables = {
"X": ["m", [-3, 2]],
"Z": ["m", [-1, 1]],
"Y": ["t"],
}
if variables is not None and lambdas is not None:
self._constraints = {
var: [utilities.Direction.LOWER, val]
for var, val in zip(variables, lambdas)
}
return scm.Scm(
constraints=self.constraints,
scm_funcs=self.scm_funcs,
variables=self.variables)
class SyntheticExample2(BaseExample):
"""Synthetic example #2 - corresponds to DAG 1(d) in the cCBO paper."""
@property
def scm_funcs(self) -> collections.OrderedDict[str, Any]:
"""Define functions in SCM."""
a = lambda noise, sample: noise
b = lambda noise, sample: noise
c = lambda noise, sample: np.exp(-sample["A"]) / 5. + noise
d = lambda noise, sample: np.cos(sample["B"]) + sample["C"] / 10. + noise
e = lambda noise, sample: np.exp(-sample["C"]) / 10. + noise
y = lambda noise, sample: np.cos(sample["D"]) - sample["D"] / 5. + np.sin( # pylint: disable=g-long-lambda
sample["E"]) - sample["E"] / 4. + noise
return collections.OrderedDict([("A", a), ("B", b), ("C", c), ("D", d),
("E", e), ("Y", y)])
def graph(self) -> multidigraph.MultiDiGraph:
"""Define causal graph structure."""
ranking = []
nodes = ["A", "B", "C", "D", "E", "Y"]
myedges = ["A -> C; C -> E; B -> D; D -> Y; C -> D; E -> Y"]
ranking.append("{{ rank=same; {} }} ".format(" ".join(nodes)))
ranking = "".join(ranking)
edges = "".join(myedges)
graph = "digraph {{ rankdir=LR; {} {} }}".format(edges, ranking)
dag = nx_agraph.from_agraph(
pygraphviz.AGraph(graphviz.Source(graph).source))
return dag
def structural_causal_model(self,
variables: Optional[Tuple[str, ...]],
lambdas: Optional[Tuple[float, ...]]) -> Any:
self._variables = {
"A": ["m", [-5, 5]],
"B": ["nm", [-4, 4]],
"C": ["nm", [0, 10]],
"D": ["m", [-1, 1]],
"E": ["m", [-1, 1]],
"Y": ["t"],
}
if variables is not None and lambdas is not None:
self._constraints = {
var: [utilities.Direction.LOWER, val]
for var, val in zip(variables, lambdas)
}
return scm.Scm(
constraints=self.constraints,
scm_funcs=self.scm_funcs,
variables=self.variables,
graph=self.graph())
class HealthExample(BaseExample):
"""Real example #1 - corresponds to Fig 1(a) in the cCBO paper."""
@property
def scm_funcs(self) -> collections.OrderedDict[str, Any]:
"""Define equations in SCM."""
a = lambda noise, sample: np.random.uniform(low=55, high=75) # age
# bmr - base metabolic rate
b = lambda noise, sample: stats.truncnorm.rvs(-1, 2) * 10 + 1500.
c = lambda noise, sample: np.random.uniform(low=-100, high=100) # calories
# height
d = lambda noise, sample: stats.truncnorm.rvs(-0.5, 0.5) * 10 + 175.
e = lambda noise, sample: (sample["B"] + 6.8 * sample["A"] - 5 * sample["D"] # pylint: disable=g-long-lambda
) / 13.7 + sample["C"] * 150. / 7716. # weight
f = lambda noise, sample: sample["E"] / ((sample["D"] / 100)**2) # bmi
g = lambda noise, sample: np.random.uniform(low=0, high=1) # statin
h = lambda noise, sample: utilities.sigmoid(-8.0 + 0.10 * sample["A"] + 0.03 # pylint: disable=g-long-lambda
* sample["F"]) # aspirin
i = lambda noise, sample: utilities.sigmoid(2.2 - 0.05 * sample[ # pylint: disable=g-long-lambda
"A"] + 0.01 * sample["F"] - 0.04 * sample["G"] + 0.02 * sample["H"]
) # cancer
y = lambda noise, sample: np.random.normal( # pylint: disable=g-long-lambda
6.8 + 0.04 * sample["A"] - 0.15 * sample["F"] - 0.60 * sample["G"] +
0.55 * sample["H"] + 1.00 * sample["I"], 0.4) # psa
return collections.OrderedDict([("A", a), ("B", b), ("C", c), ("D", d),
("E", e), ("F", f), ("G", g), ("H", h),
("I", i), ("Y", y)])
def graph(self) -> multidigraph.MultiDiGraph:
"""Define causal graph structure."""
ranking = []
nodes = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "Y"]
myedges = [
"A -> F; A -> G; A -> I; A -> H; A -> E; B -> E; C -> E; D -> E; D ->" # pylint: disable=implicit-str-concat
" F; E -> F; F -> H; F -> I; G -> I; G -> Y; H -> Y; H -> I; I -> Y"
]
ranking.append("{{ rank=same; {} }} ".format(" ".join(nodes)))
ranking = "".join(ranking)
edges = "".join(myedges)
graph = "digraph {{ rankdir=LR; {} {} }}".format(edges, ranking)
dag = nx_agraph.from_agraph(
pygraphviz.AGraph(graphviz.Source(graph).source))
return dag
def structural_causal_model(self,
variables: Optional[Tuple[str, ...]],
lambdas: Optional[Tuple[float, ...]]) -> Any:
self._variables = {
"A": ["nm", [55, 75]],
"B": ["nm", [1450, 1550]],
"C": ["m", [-400, +400]],
"D": ["nm", [169, 180]],
"E": ["nm", [68, 86]],
"F": ["nm", [19, 25]],
"G": ["m", [0, 1]],
"H": ["m", [0, 1]],
"I": ["nm", [0.2, 0.5]],
"Y": ["t"],
}
if variables is not None and lambdas is not None:
self._constraints = {
var: [utilities.Direction.LOWER, val]
for var, val in zip(variables, lambdas)
}
return scm.Scm(
constraints=self.constraints,
scm_funcs=self.scm_funcs,
variables=self.variables,
graph=self.graph())
EXAMPLES_DICT = {
"synthetic1": SyntheticExample1,
"synthetic2": SyntheticExample2,
"health": HealthExample,
}
| ccbo-main | experiments/data.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for Health experiment."""
import ml_collections
def get_config():
"""Return the default configuration for the healthcare example."""
config = ml_collections.ConfigDict()
# Name associated with this SCM
config.example_name = 'health'
config.n_trials = 100 # Number of trials to run.
config.n_samples_obs = 100 # Number of initial observational data points.
# Number of samples per interventional distribution.
config.n_samples_per_intervention = 10
# Number to sample to use to get the ground truth function
config.n_samples_ground_truth = 10
# Seed to use to sample the anchor points.
config.seed_anchor_points = 1
# Use a regular grid of points to evaluate the acquisition function
# or sample points uniformly.
config.sample_anchor_points = False
# Number of points on a regular grid to evaluate the acquisition function.
config.n_grid_points = 10
# Learn or fix the likelihoood noise in the GP model.
config.fix_likelihood_noise_var = True
# Learn or fix the likelihoood noise in the GP model.
config.noisy_acquisition = False
config.intervention_variables = (('C',), ('C', 'G'), ('C', 'H'),
('C', 'G', 'H')) # Intervention variables.
config.intervention_levels = ((-400.,), (-400., 0.5), (-400., 0.5),
(-400, 0.5, 0.5)) # Intervention values.
config.constraints = ml_collections.ConfigDict()
config.constraints.variables = ('F',)
config.constraints.lambdas = (25.,) # Constraint values.
# Exploration sets
config.exploration_sets = (('C',), ('C', 'G'), ('C', 'H'), ('C', 'G', 'H'))
# Sum the RBF kernel to the Monte Carlo one
config.add_rbf_kernel = False
# Whethere to update the SCM at every iteration for G-MTGP
config.update_scm = False
# Use hp_prior in kernel
config.use_hp_prior = False
# Number of samples for the kernel computation
config.n_kernel_samples = 50
# Specify which model to run with possible values:
# "cbo", "ccbo_single_task", "ccbo_single_task_causal_prior",
# "ccbo_multi_task", "ccbo_multi_task_causal_prior", "ccbo_dag_multi_task"
config.model_name = 'ccbo_single_task'
return config
| ccbo-main | experiments/config_health.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Plotting utilities."""
from typing import Any, Dict, List, Optional, Tuple
from emukit.model_wrappers import gpy_model_wrappers
from matplotlib import pyplot as plt
import numpy as np
from ccbo.utils import utilities
def plot_acquisition(inputs: np.ndarray, improvement: np.ndarray,
x_new: np.ndarray,
probability_feasibility: Optional[np.ndarray] = None,
multi_task_model: bool = False) -> None:
"""Plot the acquisition function."""
# Plot expected improvement
plt.plot(inputs, improvement, label='EI')
# Plot probability_feasibility is this is not none
if probability_feasibility is not None:
if not isinstance(probability_feasibility, float) and not multi_task_model:
# If probability of feasibility is one everywhere do not plot it
if int(np.sum(probability_feasibility)) != inputs.shape[0]:
plt.plot(inputs, probability_feasibility, label='PF')
plt.plot(inputs, improvement * probability_feasibility, label='cEI')
# Plot new selected point
plt.axvline(
x=x_new, color='red', linestyle='-', label='new point is:' + str(x_new))
plt.legend()
plt.show()
def plot_outcome(
n: int,
outcomes: List[Any],
labels: List[str],
title: Optional[str] = None,
true_objective_values: Optional[List[float]] = None) -> None:
"""Plot convergence results."""
_, ax = plt.subplots(1, figsize=(6, 6), sharex=True)
for ii, out in enumerate(outcomes):
ax.plot(out, lw=2, label=labels[ii], alpha=0.5)
if true_objective_values:
ax.hlines(
true_objective_values,
0,
n,
'red',
ls='--',
lw=1,
alpha=0.7,
label='Ground truth')
ax.set_ylabel(r'$y^*$')
ax.grid(True)
ax.legend(
ncol=3,
fontsize='medium',
loc='center',
frameon=False,
bbox_to_anchor=(0.5, 1.2))
ax.set_xlabel(r'Trials')
ax.set_xlim(0, n)
if title:
plt.title(title)
plt.subplots_adjust(hspace=0)
plt.show()
def plot_save_outcome(
n: float,
outcomes: List[Any],
labels: List[str],
true_objective_values: Optional[List[float]] = None,
) -> None:
"""Plot convergence results."""
_, ax = plt.subplots(1, figsize=(6, 6), sharex=True)
j = 0
for ii, out in enumerate(outcomes):
ax.plot(out[j][1:], lw=2, label=labels[ii], alpha=0.5)
if true_objective_values:
ax.hlines(
true_objective_values[j],
0,
n,
'red',
ls='--',
lw=1,
alpha=0.7,
label='Ground truth')
ax.set_ylabel(r'$y^*_{}$'.format(j))
ax.grid(True)
ax.legend(
ncol=3,
fontsize='medium',
loc='center',
frameon=False,
bbox_to_anchor=(0.5, 1.2))
ax.set_xlabel(r'Trials')
ax.set_xlim(0, n - 2)
plt.subplots_adjust(hspace=0)
plt.close()
def plot_models(
model: Any, # Can be dict for constraints or bo model for target
exploration_sets: List[Tuple[str, ...]],
ground_truth: Any,
interventional_grids: Dict[Tuple[str, ...], np.ndarray],
interventional_data_x: Dict[Tuple[str, ...], Any],
interventional_data_y: Dict[Tuple[str, ...], Any],
multi_task_model: bool = False) -> None:
"""Plots a set models."""
for es in exploration_sets:
# Only plot is the input space is one dimensional
if len(es) == 1:
inputs = np.asarray(interventional_grids[es])
if isinstance(model[es], dict):
# We are plotting the constraints
for i, p in enumerate(list(model[es].keys())):
true_vals = ground_truth[es][p]
plot_single_model(inputs, i + 1, model[es][p], multi_task_model,
true_vals, interventional_data_x[es],
interventional_data_y[es][p])
else:
# We are plotting the target
true_vals = utilities.make_column_shape_2d(ground_truth[es])
plot_single_model(inputs, 0, model[es], multi_task_model, true_vals,
interventional_data_x[es], interventional_data_y[es])
def plot_single_model(inputs: np.ndarray, task: int,
single_model: gpy_model_wrappers.GPyModelWrapper,
multi_task_model: bool, ground_truth: np.ndarray,
data_x: np.ndarray, data_y: np.ndarray) -> None:
"""Plots a single model."""
if single_model is not None:
if multi_task_model:
# The constraint functions correspond to the indices 0-p where
# p is the total number of tasks in a multi-task model. In order to
# predict the inputs need to be augmented with the task index.
inputs = np.concatenate([inputs, task * np.ones((inputs.shape[0], 1))],
axis=1)
mean, var = single_model.predict(inputs)
plt.scatter(data_x, data_y)
# GP variance
plt.fill_between(
inputs[:, 0], (mean - var)[:, 0], (mean + var)[:, 0], alpha=0.2)
# GP mean
plt.plot(inputs[:, 0], mean, 'b', label='posterior mean')
# True function
plt.plot(inputs[:, 0], ground_truth, 'r', label='True')
plt.legend()
plt.show()
| ccbo-main | utils/plotting_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute constraints related quantities."""
from __future__ import annotations
import collections
import copy
import operator
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union
import networkx
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import intervention_functions
from ccbo.utils import utilities
EVAL_CONSTRAINT_OP = {
utilities.Direction.LOWER: operator.lt,
utilities.Direction.HIGHER: operator.gt
}
def best_feasible_initial_es(initial_feasibility: Dict[Any, Any],
best_es: Tuple[str, ...], best_level: Any,
interventional_data_y: Dict[Tuple[str, ...], Any],
interventional_data_x: Dict[Tuple[str, ...], Any],
task: Any) -> Tuple[Any, Any]:
"""Check if initial best es if feasible and compute otherwise."""
feasible_best_es = best_es
feasible_best_level = best_level
feasible_sets = {}
for key, value in initial_feasibility.items():
if list(value.values()):
# There exists constraints for this key thus we check if there is at least
# one feasible initial value
feasible_sets[key] = np.any(list(value.values()))
else:
# There are no constraints for this key thus the set is feasible
feasible_sets[key] = True
# Check if best_es is feasible. If yes return it otherwise recompute the
# best feasible set by filtering out sets that are not feasible and computing
# the task (min or max) among the filtered values
if not feasible_sets[best_es]:
feasible_interventional_data_y = {
key: task(interventional_data_y[key])
for key, val in feasible_sets.items()
if val
}
# If the filtered dict is not empty we select the set among the keys
# otherwise we return the initial best_es
if feasible_interventional_data_y:
feasible_best_es = task(
feasible_interventional_data_y,
key=feasible_interventional_data_y.get)
feasible_best_level = interventional_data_x[feasible_best_es]
return feasible_best_es, feasible_best_level
def initialise_constraints_interventional_objects(
exploration_sets: Sequence[Set[str]],
intervention_samples: Dict[Tuple[str, ...], Dict[str, Any]],
interventional_data_y_constraints: Dict[Any, Any],
bo_model_constraints: Dict[Any, Any], initial_feasibility: Dict[Any, Any],
constraints: Dict[str, List[Any]], feasibility: Dict[Any, Any]) -> None:
"""Initialise interventional data for the constraints."""
assert isinstance(intervention_samples, dict)
for es in exploration_sets:
if es not in intervention_samples:
# No interventional data
pass
else:
# Interventional data contains a dictionary of dictionaries
# each corresponding to one type (es) of intervention.
# es on keys and nd.array on values
data_subset = intervention_samples[es]
for var in list(bo_model_constraints[es].keys()):
# Removing the temporal index
value = data_subset[var].reshape(-1, 1)
interventional_data_y_constraints[es][var] = value
# Check if the point is feasible or not
initial_feasibility[es][var] = EVAL_CONSTRAINT_OP[
constraints[var][0]](value, constraints[var][1])
feasibility[es] = [int(all(list(initial_feasibility[es].values())))]
def get_constraints_dicts(
exploration_sets: List[Tuple[str, ...]],
constraints: Dict[str, List[float]],
graph: multidigraph.MultiDiGraph,
target_variable: str,
observational_samples: Dict[str, np.ndarray]
) -> Tuple[Dict[Any, Dict[str, Union[int, Any]]], Dict[Any, Any], Dict[
Any, Any], Dict[Any, Any], Dict[Any, Any]]:
"""Initialise dictionaries of the constraints."""
bo_model_constraints = {es: None for es in exploration_sets}
initial_feasibility = {es: None for es in exploration_sets}
# Initialize object to store the feasibility of the interventional points
feasibility = {es: None for es in exploration_sets}
constraints_dict = {}
protected_variables = list(constraints.keys())
for es in exploration_sets:
# For every es, select the variables appearning in the constraints
# Note that every es has a different number of constraints and vars
for var in es:
if var in protected_variables:
# Get C(X)
protected_variables.remove(var)
descendants_vars = list(set().union(*[
list(networkx.descendants(graph, element_es))
for element_es in list(es)
]))
descendants_vars.remove(target_variable)
# The relevant constraints for X are denoted by c_x. This includes the
# protected variables that are also descendant of X and are not themselves
# included in es.
c_x = [var for var in protected_variables if var in descendants_vars]
# Store the constrainted variables and their number
constraints_dict[es] = {
'num': len(c_x),
'vars': c_x
}
# Initialize models for the constrained
bo_model_constraints[es] = {var: None for var in c_x}
# Initialise feasibility stores the feasibility of the initial
# interventional data if provided. It is initialized to 0.
initial_feasibility[es] = {var: 0 for var in c_x}
# Check that threshold value is not None or assign a value using d_o
if es in constraints:
if constraints[es][1] is None: # check the threshold
assert observational_samples is not None, (
'Specify threshold values or provide D_O')
constraints[es][1] = np.mean(observational_samples[es][:, 0])
interventional_data_y_constraints = copy.deepcopy(bo_model_constraints)
return (constraints_dict, initial_feasibility, bo_model_constraints,
feasibility, interventional_data_y_constraints)
def compute_constraints_functions(
exploration_sets: Optional[Any],
constraints_dict: Dict[str, Dict[str, Any]],
interventional_grids: Dict[Any, Any],
scm_funcs: collections.OrderedDict[str, Any],
graph: multidigraph.MultiDiGraph,
dict_variables: Dict[str, List[Any]],
sampling_seed: int,
n_samples: int = 1) -> Dict[str, Dict[str, List[float]]]:
"""Compute ground truth functions for the constraints."""
constraints_values_dict = {}
for es in exploration_sets:
constraints_values_dict[es] = {}
for j in range(constraints_dict[es]['num']):
c_target = constraints_dict[es]['vars'][j]
_, _, _, _, _, ce_constraints = (
intervention_functions.get_optimal_interventions(
graph=graph,
exploration_sets=[es],
interventional_grids=interventional_grids,
scm_funcs=scm_funcs,
model_variables=list(dict_variables.keys()),
target_variable=c_target,
n_samples=n_samples,
sampling_seed=sampling_seed,
)
)
constraints_values_dict[es][c_target] = ce_constraints[es]
return constraints_values_dict
def get_constraints_dict(
exploration_sets: Optional[Any],
protected_variables: List[str],
target_variable: str,
graph: multidigraph.MultiDiGraph) -> Dict[str, Dict[str, Any]]:
"""Get number and constrained variables for each intervention."""
constraints_dict = {}
for es in exploration_sets:
# For every es, select the variables appearing in the constraints
# Note that every es has a different number of constraints and vars
for var in es:
if var in protected_variables:
# Get P(X)
protected_variables.remove(var)
descendants_vars = list(set().union(*[
list(networkx.descendants(graph, element_es))
for element_es in list(es)
]))
descendants_vars.remove(target_variable)
# c_x are the variables that are constrained for the intervention set X
c_x = [value for value in protected_variables if value in descendants_vars]
# Store the constrainted variables and their number
constraints_dict[es] = {
'num': len(c_x),
'vars': c_x
}
return constraints_dict
def verify_feasibility(
exploration_sets: List[Tuple[str, ...]],
all_ce: Dict[Tuple[str, ...], List[Any]],
constraints: Dict[str, List[Any]], constraints_dict: Dict[str,
Dict[str, Any]],
scm_funcs: collections.OrderedDict[str, Any],
graph: multidigraph.MultiDiGraph,
dict_variables: Dict[str, List[Any]],
interventional_grids: Dict[Tuple[str, ...], Optional[np.ndarray]],
sampling_seed: int,
optimal_level: Optional[np.ndarray] = None,
optimal_unconstrained_set: Optional[Tuple[str, ...]] = None,
n_samples: int = 1,
task: utilities.Task = utilities.Task.MIN
) -> Tuple[bool, Tuple[str, ...], Any, Any]:
"""Verify feasibility and get constrained solution."""
# Optimal unconstrained solution
if optimal_unconstrained_set is None:
optimal_unconstrained_set = exploration_sets[utilities.ARG_EVAL_FN[task](([
utilities.EVAL_FN[task](all_ce[var])
for var in exploration_sets
if all_ce[var]
]))]
if optimal_level is None:
optimal_level = interventional_grids[optimal_unconstrained_set][
utilities.ARG_EVAL_FN[task](all_ce[optimal_unconstrained_set])]
optimal_y = utilities.EVAL_FN[task](all_ce[optimal_unconstrained_set])
feasibility_list = []
if constraints_dict[optimal_unconstrained_set]['num'] == 0:
# No constraints
feasibility_list.append(1)
else:
# Get value for the constraints
for p in constraints_dict[optimal_unconstrained_set]['vars']:
(_, _, _, _, _,
constrain_values) = intervention_functions.get_optimal_interventions(
exploration_sets=[optimal_unconstrained_set],
interventional_grids={optimal_unconstrained_set: [optimal_level]},
scm_funcs=scm_funcs,
graph=graph,
model_variables=list(dict_variables.keys()),
target_variable=p,
n_samples=n_samples,
sampling_seed=sampling_seed)
# Check if constrain is satisfied
tmp = EVAL_CONSTRAINT_OP[constraints[p][0]](
constrain_values[optimal_unconstrained_set][0], constraints[p][1])
feasibility_list.append(tmp)
# Check if all constraints are satisfied
is_feasible = all(feasibility_list)
return (is_feasible, optimal_unconstrained_set, optimal_level, optimal_y)
| ccbo-main | utils/constraints_functions.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sampling utilities."""
from __future__ import annotations
import collections
from typing import Any, Dict, Optional, List
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import graph_functions
def sample_scm(
scm_funcs: collections.OrderedDict[str, Any],
graph: Optional[multidigraph.MultiDiGraph] = None,
interventions: Optional[Dict[str, Any]] = None,
n_samples: int = 10,
compute_moments: bool = True,
moment: Optional[int] = None,
seed: Optional[int] = None) -> Dict[str, Any]:
"""Get samples or moments of samples from a SCM using the true or estimated functions.
When using the estimated SCM functions these are currently fitted using
Gaussian processes.
Args:
scm_funcs : functions in the SCM.
graph : causal graph. If None the true functions in the SCM are used.
interventions : Interventions to be implemented in the SCM. If None this
functions samples from the observational distribution.
n_samples: number of samples to get for each node in the SCM.
compute_moments : Whether to aggregate the samples from the SCM to compute
the moments of the observational or interventional distribution.
If False the full array of samples is return.
moment: which moment (0 or 1) to compute given the samples from the SCM.
If moment = 0 this function returns the expected value.
If moment = 1 this function returns the variance.
seed: Seed to use to sample the exogenous variables in the SCM.
Returns:
Samples or moments of samples from the true or estimated distributions
(observational or interventional) associated to the SCM.
"""
if seed is not None:
# This seed is controlling the sampling of the exogenous variables and of
# the estimated functions in the SCM. When this is fixed both sources of
# randomness are fixed.
np.random.seed(seed)
# Dictionary to store the average of the samples.
sample = collections.OrderedDict([(k, []) for k in scm_funcs.keys()])
for _ in range(n_samples):
epsilon = {k: np.random.randn(1) for k in scm_funcs.keys()}
# Dictionary to store one sample.
tmp = collections.OrderedDict([(k, np.zeros(1)) for k in scm_funcs.keys()])
# Loop over the nodes in the DAG and either assign the intervention
# value or sample from the true or estimated functions in the SCM.
for var, function in scm_funcs.items():
if interventions and var in interventions and interventions[
var] is not None:
# Assign the intervened value. Note that if interventions exist they
# take precedence.
tmp[var] = interventions[var]
else:
# If the graph is given this function samples from the estimated
# functions in the SCM. If it is not given the true functions are
# used to sample.
if graph:
# Get the parents of the variable we are sampling. The parents are
# used to get the right function to sample from in the dictionary of
# estimated SCM functions.
parents = graph_functions.get_node_parents(graph, var)
if parents:
# If the variable has parents sample from the estimated function.
tmp[var] = function(parents, var, tmp)
else:
# If the variable does not have parents sample from
# marginal distribution.
tmp[var] = function((None, var))
else:
# Sample from true SCM.
tmp[var] = function(epsilon[var], tmp)
# Store a single sample.
sample[var].append(tmp[var])
# Aggregate the samples if compute moments is True or return the full stacked
# array of samples otherwise.
if compute_moments:
if moment == 0:
# Take the average of the samples for each node.
sample = {k: np.array(np.mean(v)) for k, v in sample.items()}
elif moment == 1:
# Take the variance of the samples for each node.
sample = {k: np.array(np.var(v)) for k, v in sample.items()}
else:
raise NotImplementedError('Moment {moment} not implemented.')
else:
# Stack the full list of samples obtained for each node.
sample = {k: np.vstack(v) for k, v in sample.items()}
return sample
def select_sample(sample: Dict[str, np.ndarray],
input_variables: List[str]) -> np.ndarray:
"""Returns a sample for the set of input variables.
Args:
sample : a sample from the SCM.
input_variables : variables for which we want to get the values in sample.
Returns:
The sampled value(s) for the variable(s) given in input_variables.
"""
if isinstance(input_variables, str):
# input_variables only includes one variable which is given by a string
return sample[input_variables].reshape(-1, 1)
else:
# input_variables includes multiple variables that are given
# by a tuple() or a list()
samp = []
for node in input_variables:
samp.append(sample[node].reshape(-1, 1))
return np.hstack(samp)
| ccbo-main | utils/sampling_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.utilities."""
import unittest
import numpy as np
from ccbo.experiments import data
from ccbo.utils import sampling_utils
from ccbo.utils import scm_utils
class SamplingUtilsTest(unittest.TestCase):
def test_sample_scm(self):
example = data.EXAMPLES_DICT["synthetic1"]()
scm = example.structural_causal_model(
variables=("X", "Z"), lambdas=(1., 2.))
graph = scm.graph
precision = 5
# Test sampling from true observational distribution
d_o_expected_values = {
"X":
np.array([[1.62435], [-1.07297], [1.74481], [-0.24937],
[-0.32241]]),
"Z":
np.array([[-0.41472], [3.78945], [-0.58653], [2.74533], [0.99641]]),
"Y":
np.array([[-0.63389], [-3.92631], [0.12215], [-3.85439], [0.72569]])
}
# Sample from the true observational distribution
d_o = sampling_utils.sample_scm(
scm_funcs=scm.scm_funcs,
graph=None,
n_samples=5,
compute_moments=False,
seed=1)
# Test
for key, value in d_o.items():
assert isinstance(value, np.ndarray)
np.testing.assert_array_almost_equal(value, d_o_expected_values[key],
precision)
# Test sampling from true intervetional distribution
d_i_expected_values = {
"X":
np.array([[1.], [1.], [1.], [1.], [1.]]),
"Y":
np.array([[-0.57003], [-2.91060], [0.22282], [-3.22900],
[1.13283]]),
"Z":
np.array([[-0.24388], [1.23329], [-0.39333], [1.82999], [-0.01617]])
}
intervention = {v: None for v in graph.nodes}
intervention_level = np.array(1.)
intervention_var = "X"
intervention[intervention_var] = intervention_level
# Sample from the true interventional distribution
d_i = sampling_utils.sample_scm(
scm_funcs=scm.scm_funcs,
graph=None,
interventions=intervention,
n_samples=5,
compute_moments=False,
seed=1)
# Test
for val in d_i[intervention_var]:
self.assertEqual(val, intervention_level)
for var in ["Z", "Y"]:
np.testing.assert_array_almost_equal(d_i[var], d_i_expected_values[var],
precision)
# Test sampling from estimated interventional distribution
d_i_estimated_expected_values = {
"X":
np.array([[1.], [1.], [1.], [1.], [1.]]),
"Y":
np.array([[-0.45850], [0.03227], [-0.33184], [-0.02329],
[-1.01595]]),
"Z":
np.array([[-0.18379], [0.37207], [-0.52341], [0.71496], [-0.52929]])
}
# Sample from the estimated interventional distribution given the fitted
# SCM functions
fitted_scm_fncs = scm_utils.fit_scm_fncs(graph, d_o,
scm.scm_funcs,
1)
fitted_scm = scm_utils.build_fitted_scm(graph, fitted_scm_fncs,
scm.scm_funcs)
d_i_estimated = sampling_utils.sample_scm(
scm_funcs=fitted_scm().functions(),
graph=graph,
interventions=intervention,
n_samples=5,
compute_moments=False,
seed=1)
# Test
for var in ["Z", "Y"]:
np.testing.assert_array_almost_equal(d_i_estimated[var],
d_i_estimated_expected_values[var],
precision)
def test_select_sample(self):
values = {
"X": np.array([[1.], [2], [3.]]),
"Y": np.array([[4.], [5], [6.]]),
"Z": np.array([[7.], [8], [9.]]),
}
input_variables_list = ["X", ["Z", "Y"]]
expected_values = [
np.array([[1.], [2], [3.]]),
np.array([[7., 4.], [8., 5.], [9., 6.]])
]
for var, value in zip(input_variables_list, expected_values):
res = sampling_utils.select_sample(values, var)
np.testing.assert_array_equal(res, value)
if __name__ == "__main__":
unittest.main()
| ccbo-main | utils/sampling_utils_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian process utils."""
from __future__ import annotations
import collections
import functools
import itertools
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Set, Tuple
from GPy import core
from GPy import kern
from GPy import likelihoods
from GPy.models import gp_regression
from GPy.util import multioutput
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import initialisation_utils
from ccbo.utils import sampling_utils
from ccbo.utils import utilities
class GPCausalCoregionalizedRegression(core.GP):
"""Gaussian Process model for causal multioutput regression.
With respect to the build in function in GPy, this function gives the user the
possibility to specify a mean function that can vary across tasks, that is
across causal effects.
"""
def __init__(self,
x_list: List[float],
y_list: List[float],
kernel: kern.Kern,
likelihoods_list: Optional[List[likelihoods.Likelihood]] = None,
mean_function: Optional[Mapping[np.ndarray, np.ndarray]] = None,
name: str = 'GP_CausalCR'):
# Define inputs and outputs
x, y, self.output_index = multioutput.build_XY(x_list, y_list)
# Define likelihood for each task
likelihood = multioutput.build_likelihood(y_list, self.output_index,
likelihoods_list)
# Initialize model
super().__init__(
x,
y,
kernel,
likelihood,
mean_function=mean_function,
Y_metadata={'output_index': self.output_index})
def mean_function_multitask_model(
total_mean_list: List[Callable[[Any], np.ndarray]]
) -> Callable[[np.ndarray], np.ndarray]:
"""Computes the mean functions for a multi-task model ."""
def mean_function_multitask_internal(values: np.ndarray) -> np.ndarray:
# The argument values gives the input values at which to compute the mean
# functions. Here the first dimension gives the x value whereas the second
# gives the function index and therefore which mapping to use out of the
# total_mean_list
out = []
for i in range(values.shape[0]):
single_value = values[i, :]
# Get the index of the function. This is the last dimension of the inputs.
index = int(single_value[-1])
# Compute the mean function corresponding to the index at the input value
# which is given in the D-1 rows of single_value
res = total_mean_list[index]([single_value[:-1]])
out.append(res)
return np.vstack(out)
return mean_function_multitask_internal
def get_causal_effect_by_sampling(
values: np.ndarray,
y: str,
x: Tuple[str, ...],
graph: multidigraph.MultiDiGraph,
fitted_scm: Callable[[], Any],
true_scm_funcs: collections.OrderedDict[str, Any],
dict_store: Dict[Tuple[str, ...], Dict[str, Any]],
seed: Optional[int] = None,
moment: int = 0,
n_samples: int = 10,
compute_moments: bool = True,
use_true_scm: bool = False) -> np.ndarray:
"""Get mean or variance of the causal effect by sampling with a given seed."""
interventions = initialisation_utils.make_intervention_dict(graph)
out = []
for xval in values:
# Check if we have a nested dictionary. This is needed to distinguish
# between constrained and unconstrained settings. In unconstrained settings
# dict_store is {v: {'xval': }}. In constrained it is {v: {c: {'xval': }}}
# thus a nested dict. Notice that we might also have empty inner dict thus
# we also need to check the len of the resulting values list.
# if len(list(dict_store[x].values())) and isinstance(
# list(dict_store[x].values())[0], dict):
if list(dict_store[x].values()) and isinstance(
list(dict_store[x].values())[0], dict):
# Computing var for the constraints
stored_values = dict_store[x][y]
else:
# Computing var for the target
stored_values = dict_store[x]
# Check if the var value for xval has already been computed
if str(xval) in stored_values:
# If we have stored a list of samples and we want to compute the moments
# in this function we need to take the average of the samples
if isinstance(stored_values[str(xval)], list) and compute_moments:
out.append(np.mean(stored_values[str(xval)]))
else:
out.append(stored_values[str(xval)])
else:
# Otherwise compute it and store it
for intervention_variable, xx in zip(x, xval):
interventions[intervention_variable] = xx
get_samples = functools.partial(
sampling_utils.sample_scm,
interventions=interventions,
n_samples=n_samples,
compute_moments=compute_moments,
moment=moment,
seed=seed)
if use_true_scm:
# Sample from the true interventional distribution
sample = get_samples(scm_funcs=true_scm_funcs, graph=None)
else:
# Sample from the estimated interventional distribution
sample = get_samples(
scm_funcs=fitted_scm().functions(),
graph=graph)
out.append(sample[y])
stored_values[str(xval)] = sample[y]
return np.vstack(out)
def get_product_mean_functions(
graph: multidigraph.MultiDiGraph,
target_variable: str,
target_list: List[str],
exploration_set: Set[str],
fitted_scm: Callable[[], Any],
true_scm_funcs: Callable[[], Any],
mean_dict_store: Dict[Tuple[str, ...], Dict[str, Any]],
mean_constraints_dict_store: Dict[
Tuple[str, ...], Dict[str, Dict[str, Any]]
],
seeds: List[int],
n_samples: int = 10,
compute_moments: bool = False,
use_true_scm: bool = False,
) -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""Wrapper around mean_function_internal to compute product of mean funcs."""
def product_mean_function(x: np.ndarray,
x2: Optional[np.ndarray]) -> np.ndarray:
mean_func = functools.partial(
get_causal_effect_by_sampling,
x=exploration_set,
graph=graph,
fitted_scm=fitted_scm,
true_scm_funcs=true_scm_funcs,
n_samples=n_samples,
compute_moments=compute_moments,
use_true_scm=use_true_scm)
get_stored_values = functools.partial(
utilities.get_stored_values,
target_variable=target_variable,
mean_dict_store=mean_dict_store,
mean_constraints_dict_store=mean_constraints_dict_store)
# If x2 is not None, we need compute the full covariance matrix across
# points in x and in x2. The values to consider are given by all
# couples where the first value is in x and the second is in x2. If instead
# x2 is None we want to compute the diagonal of the correlation matrix which
# is given by iterating over the couples of points of x with itself.
if x2 is not None:
values_to_compute = itertools.product(x, x2)
else:
values_to_compute = zip(x, x)
out = []
for xval, x2val in list(values_to_compute):
target_1 = target_list[int(xval[-1])]
target_2 = target_list[int(x2val[-1])]
mean_1 = mean_func(
values=[xval[:-1]],
dict_store=get_stored_values(target=target_1),
y=target_1,
seed=seeds[0])
mean_2 = mean_func(
values=[x2val[:-1]],
dict_store=get_stored_values(target=target_2),
y=target_2,
seed=seeds[1])
# We need to squeeze the product ss the function
# get_causal_effect_by_sampling returns a 3d tensor where the second and
# third dimensions are one - with this we only keep a 1d vector
product = np.squeeze(mean_1 * mean_2)
if not compute_moments:
# Average is NOT done in get_causal_effect_by_sampling
# as we need to get the product before averaging the samples
out.append(np.mean(product))
else:
# Average is already done in get_causal_effect_by_sampling
# thus there is not need to do it here
out.append(product)
if x2 is not None:
# Computing the full covariance matrix
res = np.reshape(out, (x.shape[0], x2.shape[0]))
else:
# Computing the diagonal terms which gives a vector
res = np.vstack(out)
return res
return product_mean_function
def update_sufficient_statistics_hat(
graph: multidigraph.MultiDiGraph,
y: str,
x: Tuple[str, ...],
fitted_scm: Callable[[], Any],
true_scm_funcs: collections.OrderedDict[str, Any],
mean_dict_store: Dict[Tuple[str, ...], Any],
var_dict_store: Dict[Tuple[str, ...], Any],
seed: Optional[int] = None,
n_samples: int = 10,
use_true_scm: bool = False,
) -> Tuple[Callable[[np.ndarray], Any], Callable[[np.ndarray], Any]]:
"""Updates the mean and variance functions (priors) on the causal effects."""
# Initialize the function to compute the mean and variance of the causal
# effects by sampling from the estimated SCM.
mean_var_function = functools.partial(
get_causal_effect_by_sampling,
y=y,
x=x,
graph=graph,
fitted_scm=fitted_scm,
true_scm_funcs=true_scm_funcs,
n_samples=n_samples,
seed=seed,
use_true_scm=use_true_scm)
# moment=0 is the default thus we only need to pass the values at which to
# compute the mean and the dict to stored the computed values
def mean_function(values: np.ndarray) -> np.ndarray:
return mean_var_function(values=values, dict_store=mean_dict_store)
# To compute the variance of the samples we need to set moment=1 and provide
# the relevant dict where to store the values
def variance_function(values: np.ndarray) -> np.ndarray:
return mean_var_function(values=values, moment=1, dict_store=var_dict_store)
return mean_function, variance_function
def fit_gp(
x: np.ndarray,
y: np.ndarray,
lengthscale: float = 1.0,
variance: float = 1.0,
noise_var: float = 1.0,
ard: bool = False,
n_restart: int = 10,
seed: int = 0,
):
"""Fits a Gaussian process."""
# The random seed ensures that given the same data the optimization
# of the GP model leads to the same optimized hyper-parameters.
np.random.seed(seed)
kernel = kern.RBF(
x.shape[1], ARD=ard, lengthscale=lengthscale, variance=variance)
model = gp_regression.GPRegression(
X=x, Y=y, kernel=kernel, noise_var=noise_var)
model.optimize_restarts(n_restart, verbose=False, robust=True)
return model
def get_lenghscale_hp(all_vars: Sequence[str],
interventional_limits: Dict[str, Sequence[float]],
ratio_factor: float = 2.) -> float:
"""Get hyperparameter for the lenghscale of the RBF kernel of the GP model."""
dist = 0.
# If all_vars only include one variable transform this into a tuple
all_vars = (all_vars,) if isinstance(all_vars, str) else all_vars
for var in list(all_vars):
limits = interventional_limits[var]
dist += np.linalg.norm(limits[0] - limits[1])
prior_lenghscale = (dist/len(all_vars))/ratio_factor
return prior_lenghscale
| ccbo-main | utils/gp_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph functions."""
import itertools
from typing import Any, Dict, List, Optional, Tuple, Union
import graphviz
from networkx.algorithms import dag
from networkx.classes import multidigraph
def get_sorted_nodes(graph: multidigraph.MultiDiGraph) -> Dict[str, int]:
"""Returns dict of nodes in topological order."""
sorted_nodes = {val: ix for ix, val in enumerate(dag.topological_sort(graph))}
return sorted_nodes
def get_node_parents(
graph: multidigraph.MultiDiGraph, node: str
) -> Tuple[str, ...]:
"""Returns the parents of the given node."""
assert node in graph.nodes()
# The fitted SCM functions expect inputs in a specific order
# (the topological order). Hence the additional sorting.
sorted_nodes = get_sorted_nodes(graph)
return tuple(sorted(graph.predecessors(node), key=sorted_nodes.get))
def get_all_parents(graph: multidigraph.MultiDiGraph) -> Dict[str, Any]:
"""Get the parents for each node in the graph."""
parents = {}
for var in list(graph.nodes):
parents[var] = list(get_node_parents(graph, var))
return parents
def make_graphical_model(
topology: str,
nodes: List[str],
target_node: Optional[str] = None,
verbose: bool = False) -> Union[multidigraph.MultiDiGraph, str]:
"""Generic Bayesian network.
Args:
topology: Choice of independent and dependent causal topology
nodes: List containing the nodes of the CGM
e.g. nodes=['X', 'Z', 'Y']
target_node: If we are using a independent spatial topology then we need to
specify the target node
verbose : Whether to print the graph or not
Returns:
The DOT format of the graph or a networkx object
"""
assert topology in ["dependent", "independent"]
assert nodes
if topology == "independent":
assert target_node is not None
assert isinstance(target_node, str)
spatial_edges = []
ranking = []
# Check if target node is in the list of nodes, and if so remove it
if topology == "independent":
if target_node in nodes:
nodes.remove(target_node)
node_count = len(nodes)
assert target_node not in nodes
connections = node_count * "{} -> {}; "
edge_pairs = list(sum([(item, target_node) for item in nodes], ()))
else:
node_count = len(nodes)
connections = (node_count - 1) * "{} -> {}; "
edge_pairs = []
for pair in list(zip(nodes, nodes[1:])):
for item in pair:
edge_pairs.append(item)
if topology == "independent":
# X --> Y; Z --> Y
all_nodes = nodes + [target_node]
iters = [iter(edge_pairs)]
inserts = list(itertools.chain(map(next, itertools.cycle(iters)), *iters))
spatial_edges.append(connections.format(*inserts))
ranking.append("{{ rank=same; {} }} ".format(" ".join(
[item for item in all_nodes])))
elif topology == "dependent":
# X --> Z; Z --> Y
iters = [iter(edge_pairs)]
inserts = list(itertools.chain(map(next, itertools.cycle(iters)), *iters))
spatial_edges.append(connections.format(*inserts))
ranking.append("{{ rank=same; {} }} ".format(" ".join(
[item for item in nodes])))
else:
raise ValueError("Not a valid spatial topology.")
ranking = "".join(ranking)
spatial_edges = "".join(spatial_edges)
graph = "digraph {{ rankdir=LR; {} {} }}".format(spatial_edges, ranking)
if verbose:
return graphviz.Source(graph)
else:
return graph
| ccbo-main | utils/graph_functions.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""utils init."""
| ccbo-main | utils/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.utilities."""
import unittest
import numpy as np
from ccbo.utils import utilities
class UtilitiesTest(unittest.TestCase):
def test_make_column_shape_2d(self):
num_rows = 6
row_data = np.arange(num_rows)
result = utilities.make_column_shape_2d(row_data)
self.assertEqual((num_rows, 1), result.shape)
def test_check_reshape_add_data(self):
intervention_data_x = {
('X',): np.array([[1.], [3.], [5.], [7.], [9.]]),
('Y',): None,
('X', 'Z'): np.ones((5, 2))
}
intervention_data_y = {
('X',): np.array([[2.], [4.], [6.], [8.], [10.]]),
('Y',): None,
('X', 'Z'): np.zeros((5, 1))
}
new_interventional_data_x = np.array([[5., 3.]])
y_new = 11.
# Test appending interventional data to existing data
best_es = ('X', 'Z')
result_x, result_y = utilities.check_reshape_add_data(
intervention_data_x, intervention_data_y, new_interventional_data_x,
y_new, best_es)
expected_x = np.array([[1., 1.], [1., 1.], [1., 1.], [1., 1.], [1., 1.],
[5., 3.]])
expected_y = np.array([[0.], [0.], [0.], [0.], [0.], [11.]])
self.assertTrue(np.equal(expected_x, result_x).all())
self.assertTrue(np.equal(expected_y, result_y).all())
# Test adding new interventional data
best_es = ('Y',)
new_interventional_data_x = np.array([5.])
result_new_x, result_new_y = utilities.check_reshape_add_data(
intervention_data_x, intervention_data_y, new_interventional_data_x,
y_new, best_es)
expected_new_x = np.array([[5.]])
expected_new_y = np.array([[11.]])
self.assertTrue(np.equal(expected_new_x, result_new_x).all())
self.assertTrue(np.equal(expected_new_y, result_new_y).all())
def test_monte_carlo_expectation(self):
intervention_samples = {
'X': np.array([1., 3., 5., 7., 9.]),
'Y': np.array([0., 1., 0., 1.]),
'Z': np.ones((5, 5))
}
expected_dict = {'X': 5.,
'Y': 0.5,
'Z': np.ones(5)}
result_dict = utilities.get_monte_carlo_expectation(intervention_samples)
self.assertEqual(expected_dict.keys(), result_dict.keys())
for var, mean in result_dict.items():
if isinstance(mean, np.ndarray):
self.assertTrue(np.equal(expected_dict[var], mean).all())
else:
self.assertEqual(expected_dict[var], mean)
if __name__ == '__main__':
unittest.main()
| ccbo-main | utils/utilities_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""General utilities to initialise dicts and object to store results."""
from __future__ import annotations
import copy
import itertools
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from emukit import core
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import sampling_utils
def powerset(iterable: List[str])-> itertools.chain[Tuple[str, ...]]:
"""Compute the power set of a list of values."""
# this returns e.g. powerset([1,2]) --> (1,) (2,) (1,2)
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(1,
len(s) + 1))
def create_n_dimensional_intervention_grid(limits: Any,
size_intervention_grid: int = 100
) -> np.ndarray:
"""Usage: combine_n_dimensional_intervention_grid([[-2,2],[-5,10]],10)."""
if not any(isinstance(el, list) for el in limits):
# We are just passing a single list
return np.linspace(limits[0], limits[1], size_intervention_grid)[:, None]
else:
extrema = np.vstack(limits)
inputs = [
np.linspace(i, j, size_intervention_grid)
for i, j in zip(extrema[:, 0], extrema[:, 1])
]
return np.dstack(np.meshgrid(*inputs)).ravel("F").reshape(len(inputs), -1).T
def assign_interventions(
variables: Tuple[str, ...], levels: Tuple[float, ...],
n_samples_per_intervention: int,
sampling_seed: int, d_i: Dict[Tuple[str, ...],
Any], graph: multidigraph.MultiDiGraph,
scm_funcs: Any) -> Dict[Tuple[str, ...], Dict[str, Any]]:
"""Assign initial intervention levels to d_i."""
intervention = make_intervention_dict(graph)
for var, level in zip(variables, levels):
intervention[var] = level
# Sample from the true interventional distribution
intervention_samples = sampling_utils.sample_scm(
scm_funcs=scm_funcs,
graph=None,
interventions=intervention,
n_samples=n_samples_per_intervention,
compute_moments=True,
moment=0,
seed=sampling_seed)
d_i[variables] = intervention_samples
return d_i
def get_interventional_grids(
exploration_set: List[Tuple[str, ...]],
intervention_limits: Dict[str, Sequence[float]],
size_intervention_grid: int = 100
) -> Dict[Tuple[str, ...], Optional[np.ndarray]]:
"""Build the n-dimensional interventional grids for the exploration sets."""
# Create grids
intervention_grid = {k: None for k in exploration_set}
for es in exploration_set:
if len(es) == 1:
intervention_grid[es] = create_n_dimensional_intervention_grid(
intervention_limits[es[0]], size_intervention_grid)
else:
if size_intervention_grid >= 100 and len(es) > 1:
# Reduce number of point to reduce computational cost of evaluating
# the acquisition function.
size_intervention_grid = 10
intervention_grid[es] = create_n_dimensional_intervention_grid(
[intervention_limits[j] for j in es], size_intervention_grid)
return intervention_grid
def make_intervention_dict(graph: multidigraph.MultiDiGraph) -> Dict[str, Any]:
return {v: None for v in graph.nodes}
def initialise_interventional_objects(
exploration_sets: List[Tuple[str, ...]],
d_i: Dict[Tuple[str, ...], Dict[str, Any]],
target: str,
task: Any,
nr_interventions: Optional[int] = None,
) -> Tuple[List[Tuple[str, ...]], Optional[List[np.ndarray]],
List[Optional[np.ndarray]], Dict[Tuple[str, ...],
Optional[np.ndarray]],
Dict[Tuple[str, ...], Optional[np.ndarray]]]:
"""Initialize interventional dataset."""
assert isinstance(d_i, dict)
target_values = {es: None for es in exploration_sets}
interventions = copy.deepcopy(target_values)
intervention_data_x = copy.deepcopy(target_values)
intervention_data_y = copy.deepcopy(target_values)
for es in exploration_sets:
if es not in d_i:
pass
else:
# Interventional data contains a dictionary of dictionaries.
# Each corresponds to one type (es) of intervention.
interventional_samples = d_i[es] # es on keys and nd.array on values
assert isinstance(interventional_samples,
dict), (es, type(interventional_samples), d_i)
assert target in interventional_samples
assert isinstance(interventional_samples[target], np.ndarray)
if nr_interventions:
raise NotImplementedError("Not yet implemented")
else:
# Only have one interventional sample per intervention to start with
data_subset = interventional_samples
# Find the corresponding target values at these coordinates [array]
target_values[es] = np.array(data_subset[target]).reshape(-1, 1)
assert target_values[es] is not None
# Find the corresponding interventions [array]
if len(es) == 1:
interventions[es] = np.array(data_subset[es[0]]).reshape(-1, 1)
else:
tmp = []
for var in es:
tmp.append(data_subset[var])
interventions[es] = np.expand_dims(np.hstack(tmp), axis=0)
assert interventions[es] is not None
# Set the interventional data to use
intervention_data_x[es] = interventions[es]
intervention_data_y[es] = target_values[es]
assert intervention_data_x[es] is not None
assert intervention_data_y[es] is not None
# Get best intervention set at each time index
target_values = {k: v for k, v in target_values.items() if v is not None}
best_es = task(target_values, key=target_values.get)
# Interventions
best_intervention_level = interventions[best_es]
# Outcomes
best_target_value = target_values[best_es]
# Use the best outcome level at t=0 as a prior for all the other timesteps
best_es_sequence = [best_es]
best_intervention_levels = [best_intervention_level]
best_target_levels = [best_target_value]
return (
best_es_sequence,
best_target_levels,
best_intervention_levels,
intervention_data_x,
intervention_data_y,
)
def initialise_global_outcome_dict_new(
initial_optimal_target_values: Optional[List[np.ndarray]],
blank_val: float) -> List[float]:
"""Initialize dict of outcome values."""
assert isinstance(initial_optimal_target_values, list)
targets = []
if initial_optimal_target_values[0]:
targets.append(float(initial_optimal_target_values[0]))
else:
# No interventional data was provided initially so the list is empty.
targets.append(blank_val)
return targets
def initialise_optimal_intervention_level_list(
exploration_sets: List[Tuple[str, ...]],
initial_optimal_sequential_intervention_sets: Tuple[str, ...],
initial_optimal_sequential_intervention_levels: List[np.ndarray],
number_of_trials: int) -> Dict[Tuple[str, ...], Any]:
"""Initialize list of optimal intervention levels."""
intervention_levels = {
es: number_of_trials * [None] for es in exploration_sets
}
# Add interventional data that we have at start
for es in exploration_sets:
if es == initial_optimal_sequential_intervention_sets:
intervention_levels[es].insert(
0, initial_optimal_sequential_intervention_levels)
else:
intervention_levels[es].insert(0, None)
return intervention_levels
def make_parameter_space_for_intervention_set(
exploration_set: Tuple[str, ...],
lower_limit: Union[List[float], float],
upper_limit: Union[List[float], float],
) -> core.ParameterSpace:
"""Set ParameterSpace of intervention for one exploration_set."""
assert isinstance(exploration_set, tuple)
if len(exploration_set) == 1:
assert isinstance(lower_limit, float)
assert isinstance(upper_limit, float)
return core.ParameterSpace([
core.ContinuousParameter(
str(exploration_set), lower_limit, upper_limit)
])
else:
multivariate_limits = []
assert len(exploration_set) == len(lower_limit), exploration_set
assert len(exploration_set) == len(upper_limit), exploration_set
for i, var in enumerate(exploration_set):
multivariate_limits.append(
core.ContinuousParameter(str(var), lower_limit[i], upper_limit[i]))
return core.ParameterSpace(multivariate_limits)
def create_intervention_exploration_domain(
exploration_sets: List[Tuple[str, ...]],
interventional_variable_limits: Dict[str, Sequence[float]]
) -> Dict[Tuple[str, ...], Any]:
"""Get intervention domain for exploration_set."""
intervention_exploration_domain = {es: None for es in exploration_sets}
for es in exploration_sets:
if len(es) == 1:
assert es[0] in interventional_variable_limits.keys()
ll = float(min(interventional_variable_limits[es[0]]))
ul = float(max(interventional_variable_limits[es[0]]))
else:
ll, ul = [], [] # lower-limit and upper-limit
for var in es:
ll.append(float(min(interventional_variable_limits[var])))
ul.append(float(max(interventional_variable_limits[var])))
assert len(es) == len(ul) == len(ll)
# Assign
intervention_exploration_domain[
es] = make_parameter_space_for_intervention_set(es, ll, ul)
return intervention_exploration_domain
| ccbo-main | utils/initialisation_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.scm_utils."""
import unittest
import numpy as np
from ccbo.experiments import data
from ccbo.utils import scm_utils
def setup():
example = data.EXAMPLES_DICT["synthetic1"]()
scm = example.structural_causal_model(
variables=("X", "Z"), lambdas=(1., 2.))
graph = scm.graph
scm_funcs = scm.scm_funcs
d_o = {
"X":
np.array([[1.62435], [-1.07297], [1.74481], [-0.24937],
[-0.32241]]),
"Z":
np.array([[-0.41472], [3.78945], [-0.58653], [2.74533], [0.99641]]),
"Y":
np.array([[-0.63389], [-3.92631], [0.12215], [-3.85439], [0.72569]])
}
return graph, scm_funcs, d_o
class ScmUtilsTest(unittest.TestCase):
def test_build_fitted_scm(self):
graph, scm_funcs, d_o = setup()
fitted_scm_fncs = scm_utils.fit_scm_fncs(graph, d_o, scm_funcs, 1)
fitted_scm = scm_utils.build_fitted_scm(graph, fitted_scm_fncs, scm_funcs)
# Check that keys of dictionary are correct
self.assertEqual(list(fitted_scm().functions().keys()), list(graph.nodes))
# Check that the correct sampling functions are used by looking at the
# number of args taken by each function
for k, v in fitted_scm().functions().items():
if not list(graph.predecessors(k)):
# When variable is exogenous number of args = 1
self.assertEqual(v.__code__.co_argcount, 1)
else:
# When variable is endogenous number of args = 3
self.assertEqual(v.__code__.co_argcount, 3)
def test_fit_scm_fncs(self):
graph, scm_funcs, d_o = setup()
fitted_scm_fncs = scm_utils.fit_scm_fncs(graph, d_o, scm_funcs, 1)
# Check that keys of dictionary are correct
for fn_key, k in zip(
list(fitted_scm_fncs.keys()),
[(None, "X"), (("X",), "Z"), (("Z",), "Y")],
):
self.assertEqual(fn_key, k)
# Check that KernelDensity is used for exogenous variables (with number of
# input variables equal to None) and GPRegression is used for variables that
# have parents (whose number gives the number of input variables).
for k, v in fitted_scm_fncs.items():
print(k)
if not list(graph.predecessors(k[1])):
# When variable is exogenous we use KernelDensity
self.assertIsNone(k[0])
self.assertEqual(type(v).__name__, "KernelDensity")
else:
# When variable is endogenous we use GPRegression
self.assertEqual(type(v).__name__, "GPRegression")
if __name__ == "__main__":
unittest.main()
| ccbo-main | utils/scm_utils_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SCM utilities."""
from __future__ import annotations
import collections
from typing import Any, Callable, Dict, Optional, Tuple
from networkx.classes import multidigraph
import numpy as np
from sklearn import neighbors # StatsModels works better
from ccbo.utils import gp_utils
from ccbo.utils import graph_functions
from ccbo.utils import sampling_utils
def _make_marginal(
scm_fncs: Dict[Tuple[Optional[Any], ...], Any]
) -> Callable[[Any], np.ndarray]:
"""Get a function that samples from the marginal distribution.
Args:
scm_fncs : fitted functions for the SCM.
Returns:
Function that returns a sample from a marginal distribution.
"""
# Get a sample for the exogenous node.
return lambda var: scm_fncs[var].sample()
def _make_conditional(
scm_fncs: Dict[Tuple[Optional[Any], ...], Any]
) -> Callable[[Any, Any, Any], np.ndarray]:
"""Get a function that samples from the conditional distribution.
Args:
scm_fncs : fitted functions for the SCM.
Returns:
Function that returns a sample from a conditional distribution corresponding
to the fitted SCM function for input_vars-output_var.
"""
# This function constructs a function that returns a sample for an endogenous
# node. As we only get one sample we need np.squeeze(.,axis=2) to get rid of
# the 3rd dimension returned by posterior_samples_f which is equal to one.
def sample_endogenous(input_vars, output_var, sample):
# Get the values of the input variables in the sample
selected_sample = sampling_utils.select_sample(sample, input_vars)
# Get the estimated variance for the likelihood noise corresponding to the
# GP function mapping input_vars to output_var.
variance_likelihood_noise = scm_fncs[(input_vars,
output_var)].likelihood.variance[0]
# We want to sample each endogenous node including the exogenous random
# variables which is assumed to be normally distributed with 0 mean and
# variance given by variance_likelihood_noise. We thus sample from a
# Gaussian random variable with the variance_likelihood_noise learned by
# maximum likelihood when fitting the functions in the SCM.
sample_likelihood_noise = np.random.normal(
loc=np.zeros((1, 1)),
scale=np.ones((1, 1)) * np.sqrt(variance_likelihood_noise))
# Sample from the fitted function in the SCM and add the exogenous noise.
sample = np.squeeze(
scm_fncs[(input_vars, output_var)].posterior_samples_f(
selected_sample, full_cov=True, size=1),
axis=2) + sample_likelihood_noise
return sample
return sample_endogenous
def build_fitted_scm(
graph: multidigraph.MultiDiGraph,
fitted_scm_fncs: Dict[Tuple[Optional[Any], ...], Any],
scm_fncs: collections.OrderedDict[str, Any]) -> Any:
"""Create the fitted SCM using the estimated functions for the graph edges.
Args:
graph : causal graph.
fitted_scm_fncs : fitted functions for the SCM.
scm_fncs : true SCM.
Returns:
A fitted SCM class with functions to sample from it.
"""
class FittedSCM:
"""Fitted SCM class."""
def __init__(self):
self.graph = graph
self.fitted_scm_fncs = fitted_scm_fncs
self.scm_fncs = scm_fncs
def functions(self) -> collections.OrderedDict[str, Any]:
"""Store functions sampling from the fitted SCM functions."""
# SCM functions
f = collections.OrderedDict()
for v in list(self.scm_fncs.keys()):
if self.graph.in_degree[v] == 0:
# Exogenous node
f[v] = _make_marginal(self.fitted_scm_fncs)
else:
# Endogenous node
f[v] = _make_conditional(self.fitted_scm_fncs)
return f
return FittedSCM
def fit_scm_fncs(
graph: multidigraph.MultiDiGraph,
data: Dict[str, Any],
scm: collections.OrderedDict[str, Any],
n_restart: int = 10,
kernel_density: str = "gaussian") -> Dict[Tuple[Optional[Any], ...], Any]:
"""Fit functions in the SCM.
Args:
graph : causal graph.
data : observations from the true SCM.
scm : true SCM.
n_restart: n_restart for hyperparameters optimization.
kernel_density: Type of kernel to fit to estimate the distributions of the
exogenous variables. Options are "gaussian", "tophat", "epanechnikov",
"exponential", "linear" and "cosine". Default is "gaussian".
Returns:
Dictionary containing the estimated SCM functions. For the endogenous nodes
in the graph (nodes detemined by other variables in the graph), the key in
the dictionary is a tuple giving (parents_of_node, node) while each value is
the estimated function (currently this is a GP) for the SCM.
For the exogenous nodes in the DAG (nodes without parents), the key in the
dictionary is (Node, node) while the associated value is given by a kernel
density estimator (KDE).
"""
# Get all nodes in the graph
nodes = list(scm.keys())
# Get parents for each node
parents = graph_functions.get_all_parents(graph)
# Initialise SCM functions
fncs = {}
# Assign estimators to each node in the graph
for v in nodes:
if not parents[v]:
# Exogenous node, add function to the dict with key = (None, v)
xx = data[v]
fncs[(None, v)] = neighbors.KernelDensity(kernel=kernel_density).fit(xx)
else:
# Endogenous nodes, add function to the dict with
# key = (tuple(parents[v]), v)
data_parents = []
for j in parents[v]:
data_parents.append(data[j])
xx = np.hstack(data_parents)
yy = data[v]
fncs[(tuple(parents[v]), v)] = gp_utils.fit_gp(
x=xx, y=yy, n_restart=n_restart)
return fncs
| ccbo-main | utils/scm_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""General utilities."""
import enum
import math
from typing import Any, Dict, Optional, Tuple
import numpy as np
from scipy import stats
class Trial(enum.Enum):
"""Type of trial, i.e. interventional or observational."""
INTERVENTION = 0
OBSERVATION = 1
class Task(enum.Enum):
"""Task can be either minimization or maximation."""
MIN = "min"
MAX = "max"
class Direction(enum.Enum):
"""The direction of the constraint can be either < or >."""
LOWER = "<"
HIGHER = ">"
class VariableType(enum.Enum):
"""The types of variables included in the SCM.
These can be of four types:
- Target variables = "t"
- Manipulative variable = "m"
- Non Manipulative variable = "nm"
- Unobserved counfounder = "u"
- Protected variable = "p"
"""
TARGET = "t"
MANIPULATIVE = "m"
NONMANIPULATIVE = "nm"
UNOBSERVED = "u"
EVAL_FN = {Task.MIN: min, Task.MAX: max}
ARG_EVAL_FN = {Task.MIN: np.argmin, Task.MAX: np.argmax}
A_EVAL_FN = {Task.MIN: np.amin, Task.MAX: np.amax}
def sigmoid(x: float) -> float:
return 1 / (1 + math.exp(-x))
def get_stored_values(
target: str, target_variable: str,
mean_dict_store: Dict[Tuple[str, ...], Dict[str, Any]],
mean_constraints_dict_store: Dict[Tuple[str, ...],
Dict[str, Dict[str, Any]]]
) -> Dict[Tuple[str, ...], Dict[str, Any]]:
if target == target_variable:
dict_store = mean_dict_store
else:
dict_store = mean_constraints_dict_store
return dict_store
def get_standard_normal_pdf_cdf(
x: float, mean: np.ndarray, standard_deviation: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns pdf and cdf of standard normal evaluated at (x - mean)/sigma."""
u = (x - mean) / standard_deviation
pdf = stats.norm.pdf(u)
cdf = stats.norm.cdf(u)
return u, pdf, cdf
def standard_mean_function(x: np.ndarray) -> np.ndarray:
"""Function to get zero mean for the causal kernel."""
return np.zeros_like(x)
def zero_variance_adjustment(x: np.ndarray) -> np.ndarray:
"""Function to get zero adjustment for the variance of the causal kernel."""
return np.zeros_like(x)
def make_column_shape_2d(x: Any) -> Any:
"""Reshapes an array to create a 2-d column."""
return np.array([x]).reshape(-1, 1)
def check_reshape_add_data(
interventional_data_x: Dict[Tuple[str, ...], Optional[Any]],
interventional_data_y: Dict[Tuple[str, ...], Optional[Any]],
new_interventional_data_x: Any,
y_new: float, best_es: Tuple[str, ...],
) -> Tuple[Optional[Any], Optional[Any]]:
"""Checks whether interventional data needs reshaping and adds values."""
if (interventional_data_x[best_es] is not None and
interventional_data_y[best_es] is not None):
if len(new_interventional_data_x.shape) == 1:
new_interventional_data_x = make_column_shape_2d(
new_interventional_data_x)
assert interventional_data_x[best_es].shape[
1] == new_interventional_data_x.shape[1]
# Update interventional data X
interventional_data_x[best_es] = np.vstack(
(interventional_data_x[best_es], new_interventional_data_x)
)
# Update interventional data Y
interventional_data_y[best_es] = np.vstack(
(interventional_data_y[best_es], make_column_shape_2d(y_new),)
)
else:
# Assign new interventional data
if len(new_interventional_data_x.shape) == 1 and len(best_es) == 1:
reshaped_new_interventional_data_x = make_column_shape_2d(
new_interventional_data_x)
elif len(best_es) > 1 and len(new_interventional_data_x.shape) == 1:
reshaped_new_interventional_data_x = new_interventional_data_x.reshape(
1, -1)
else:
reshaped_new_interventional_data_x = new_interventional_data_x
# Assign X and Y
interventional_data_x[best_es] = reshaped_new_interventional_data_x
interventional_data_y[best_es] = make_column_shape_2d(y_new)
assert (
np.shape(interventional_data_x[best_es])[0]
== np.shape(interventional_data_y[best_es])[0]
)
return (
interventional_data_x[best_es],
interventional_data_y[best_es],
)
def get_monte_carlo_expectation(
intervention_samples: Dict[str, np.ndarray]) -> Dict[str, Any]:
"""Returns the expected value of the intervention via MC sampling."""
expectation = {k: None for k in intervention_samples.keys()}
for es in expectation.keys():
expectation[es] = intervention_samples[es].mean(axis=0)
return expectation
| ccbo-main | utils/utilities.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to define the cost structure."""
import collections
from typing import Tuple, Callable, List, Any, OrderedDict
from emukit.core import acquisition as acq
import numpy as np
class Cost(acq.Acquisition):
"""Class for computing the cost of each intervention."""
def __init__(self, costs_functions: OrderedDict[str, Callable[[Any], Any]],
exploration_set: Tuple[str, ...], target: str):
self.costs_functions = costs_functions
self.exploration_set = exploration_set
self.target = target
def evaluate(self, x: Any) -> np.ndarray[Any, Any]:
if len(self.exploration_set) == 1:
# Univariate intervention
return self.costs_functions[self.exploration_set[0]](x)
else:
# Multivariate intervention
cost = []
for i, es_member in enumerate(self.exploration_set):
cost.append(self.costs_functions[es_member](x[:, i]))
return sum(cost)
@property
def has_gradients(self)-> bool:
return True
def define_costs(manipulative_variables: List[str],
type_cost: int,
fix_cost: float = 1.0
) -> OrderedDict[str, Callable[[Any], Any]]:
"""Initialize dict with functions to compute cost."""
if type_cost == 1:
fix_cost_function = lambda x: fix_cost
costs = collections.OrderedDict([
(var, fix_cost_function) for var in manipulative_variables
])
else:
raise NotImplementedError("Not yet implemented")
return costs
def total_intervention_cost(es: Tuple[str, ...],
costs: OrderedDict[str, Callable[[Any], Any]],
x: np.ndarray) -> float:
total_cost = 0.0
for i, es_member in enumerate(es):
total_cost += costs[es_member](x[:, i])
return total_cost
| ccbo-main | utils/cost_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Intervention function utilities."""
from __future__ import annotations
import collections
import copy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from absl import logging
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import initialisation_utils
from ccbo.utils import sampling_utils
from ccbo.utils import utilities
def assign_initial_intervention_level(
exploration_set: Tuple[str, ...],
intervention_level: np.ndarray,
variables: Sequence[str]
) -> Dict[str, Any]:
"""Intervention assignment."""
intervention_blanket = {key: None for key in variables}
if len(exploration_set) == 1:
# Assign the intervention
intervention_blanket[exploration_set[0]] = float(intervention_level)
else:
# Intervention happening on _multiple_ variables
for variable, lvl in zip(exploration_set, np.transpose(intervention_level)):
# Assign the intervention
intervention_blanket[variable] = float(lvl)
return intervention_blanket
def evaluate_target_function(
scm_funcs: collections.OrderedDict[str, Any],
exploration_set: Tuple[str, ...],
all_vars: Sequence[str],
noisy_observations: bool = False,
seed: Optional[int] = None,
n_samples_for_interventions: int = 1
) -> Callable[[str, np.ndarray], float]:
"""Evaluates the target function."""
def compute_target_function(target: str,
intervention_levels: np.ndarray) -> float:
# Assign interventions
interventions = assign_initial_intervention_level(
exploration_set=exploration_set,
intervention_level=intervention_levels,
variables=all_vars
)
# Set the noise in the SCM to be equal or different from zero
if not noisy_observations:
# We sample from the SCM with zero noise therefore we have no randomess
# and don't need to average over samples
n_samples = 1
else:
assert n_samples_for_interventions > 1, ("Noisy evaluations require a set"
" of samples to compute the "
"average causal effects.")
n_samples = n_samples_for_interventions
if seed is not None:
np.random.seed(seed)
# Sample from the true interventional distribution
interventional_samples = sampling_utils.sample_scm(
scm_funcs=scm_funcs,
graph=None,
interventions=interventions,
n_samples=n_samples,
compute_moments=True,
moment=0,
seed=seed)
# Compute the avearage effect of intervention(s) that is the target function
target_response = float(interventional_samples[target])
return target_response
return compute_target_function
def get_optimal_interventions(
exploration_sets: List[Tuple[str, ...]],
interventional_grids: Dict[Any, Any],
scm_funcs: collections.OrderedDict[str, Any],
graph: multidigraph.MultiDiGraph,
model_variables: List[str],
target_variable: str,
task: utilities.Task = utilities.Task.MIN,
n_samples: int = 1,
sampling_seed: Optional[int] = None
) -> Tuple[Any, ...]:
"""Gets the optimal interventions across exploration sets."""
logging.warning("Set high number of n_samples to ensure good estimation of "
"the ground truth but remember that the higher the number "
"of samples the slower the computations.")
assert target_variable in model_variables
optimal_interventions = {setx: None for setx in exploration_sets}
y_stars = copy.deepcopy(optimal_interventions)
interventions = initialisation_utils.make_intervention_dict(graph)
ce = {es: [] for es in exploration_sets}
# E[Y | do( . )]
for s in exploration_sets:
# Reset intervention to avoid carrying over levels from
# previous exploration set
intervention_on_s = copy.deepcopy(interventions)
for level in interventional_grids[s]:
# Univariate intervention
if len(s) == 1:
intervention_on_s[s[0]] = float(level)
# Multivariate intervention
else:
for var, val in zip(s, level):
intervention_on_s[var] = val
# Sample from the true interventional distribution
out = sampling_utils.sample_scm(
scm_funcs=scm_funcs,
graph=None,
interventions=intervention_on_s,
n_samples=n_samples,
compute_moments=True,
moment=0,
seed=sampling_seed)
ce[s].append(out[target_variable])
local_target_values = []
for s in exploration_sets:
if task.value == utilities.Task.MIN.value:
idx = np.array(ce[s]).argmin()
else:
idx = np.array(ce[s]).argmax()
local_target_values.append((s, idx, ce[s][idx]))
y_stars[s] = ce[s][idx]
optimal_interventions[s] = interventional_grids[s][idx]
# Find best intervention
best_s, best_idx, best_objective_value = min(local_target_values,
key=lambda t: t[2])
best_s_value = interventional_grids[best_s][best_idx]
return (
best_s_value,
best_s,
best_objective_value,
y_stars,
optimal_interventions,
ce,
)
| ccbo-main | utils/intervention_functions.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the multi-task causal constrained acquisition functions."""
from typing import Any, Callable, Dict, List, Optional, Tuple
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.acquisitions import multitask_constrained_expected_improvement as multitask_con_ei
from ccbo.utils import utilities
class NoisyMultiTaskConstrainedCausalExpectedImprovement(
multitask_con_ei.MultiTaskConstrainedCausalExpectedImprovement):
"""Implementation of the causal constrained EI acquisition function."""
def __init__(self, task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: Optional[gpy_model_wrappers.GPyModelWrapper],
model_constraints: Optional[gpy_model_wrappers.GPyModelWrapper],
constraints: Dict[str, List[Any]],
constraints_dict: Dict[Tuple[str, ...], Dict[str, Any]],
mean_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
variance_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
exploration_set: Tuple[str, ...]) -> None:
base_args = {
"current_global_opt": None,
"task": task,
"mean_function": mean_function,
"variance_function": variance_function,
"model": model,
"model_constraints": model_constraints,
"constraints": constraints,
"constraints_dict": constraints_dict,
"mean_function_constraints": mean_function_constraints,
"variance_function_constraints": variance_function_constraints,
"exploration_set": exploration_set
}
super().__init__(**base_args)
def get_global_opt(self, all_sample_f: np.ndarray, is_feasible: np.ndarray
) -> List[float]:
"""Get one value of feasible global optimum by sampling."""
best_feasible_points = []
for one_sample_f, one_is_feasible in zip(all_sample_f, is_feasible):
if np.any(one_sample_f[one_is_feasible]):
best_feasible_point = utilities.EVAL_FN[self.task](
one_sample_f[one_is_feasible])
else:
best_feasible_point = utilities.EVAL_FN[self.task](one_sample_f)
best_feasible_points.append(best_feasible_point)
return best_feasible_points
def get_improvement(self,
x: np.ndarray,
montecarlo_estimate: bool = True,
n_samples: int = 100,
n_samples_min: int = 10) -> np.ndarray:
"""Evaluate the Constrained Expected Improvement."""
if montecarlo_estimate:
# Sample from the target function
is_feasible = self.get_feasibility(x, n_samples_min + n_samples)
all_sample_f = self.get_samples_target_function(x,
n_samples_min + n_samples)
# Get the optimal feasible value for each sample
current_feasible_global_opt = self.get_global_opt(
all_sample_f[:n_samples_min, :], is_feasible[:n_samples_min, :])
sample_f = all_sample_f[n_samples_min:, :]
sample_g = is_feasible[n_samples_min:, :]
out = []
for i in range(n_samples_min):
diff = np.ones_like(sample_f)*current_feasible_global_opt[i] - sample_f
ei = np.vstack([
np.max((np.zeros(diff[i].shape[0]), diff[i]), axis=0)
for i in range(n_samples)
])
out.append(np.mean(sample_g * ei, axis=0))
improvement = np.mean(np.vstack(out), axis=0)
else:
raise NotImplementedError(
"Other ways of computing this functions are not implemented")
return improvement
| ccbo-main | acquisitions/noisy_multitask_constrained_expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the multi-task causal constrained acquisition functions."""
from typing import Any, Callable, Dict, List, Optional, Tuple
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.acquisitions import expected_improvement
from ccbo.utils import constraints_functions
from ccbo.utils import utilities
class MultiTaskConstrainedCausalExpectedImprovement(
expected_improvement.CausalExpectedImprovement):
"""Implementation of the causal constrained EI acquisition function.
This function computes the constrained expected improvement with respect to
the joint distribution p(f, G) where f is the target function and G is the
set of constraints functions. When the direction is > for all constraints this
function evaluates E_{p(f,G)}[max(0, y* - f)*p(G > lambda)] where lambda is
the vector of threshold values and y* is the currently best feasible value
observed. As f and G are jointly modelled via a multi-task
GP model (ICM) their joint distribution does not factorise and the resulting
constrained expected improvement cannot be computed as the product of expected
improvement and probability of feasibility. We approximate this expectation
via Monte Carlo integration.
"""
def __init__(self, current_global_opt: Optional[float], task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: Optional[gpy_model_wrappers.GPyModelWrapper],
model_constraints: Optional[gpy_model_wrappers.GPyModelWrapper],
constraints: Dict[str, List[Any]],
constraints_dict: Dict[Tuple[str, ...], Dict[str, Any]],
mean_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
variance_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
exploration_set: Tuple[str, ...]) -> None:
base_args = {
"current_global_opt": current_global_opt,
"task": task,
"mean_function": mean_function,
"variance_function": variance_function,
"model": model
}
super().__init__(**base_args)
self.model_constraints = model_constraints
self.constraints = constraints
self.constraints_dict = constraints_dict
self.mean_function_constraints = mean_function_constraints
self.variance_function_constraints = variance_function_constraints
self.exploration_set = exploration_set
def get_improvement(self,
x: np.ndarray,
montecarlo_estimate: bool = True,
n_samples: int = 1000) -> np.ndarray:
"""Evaluate the Constrained Expected Improvement.
When using a multi-task model the target function and the constraints
are correlated thus the acquisition cannot be factorized in an
improvement * probability of feasibility. We thus compute it by sampling
from the joint distribution of the function and the constraints.
Args:
x: the values at which to evaluate the acquisition.
montecarlo_estimate: whether to use a Monte Carlo estimate or not.
n_samples: number of samples to use to get a Monte Carlo estimate.
Returns:
The constrained expected improvement estimated at
x via Monte Carlo integration using n_samples.
"""
if montecarlo_estimate:
# Sample from the target function
is_feasible = self.get_feasibility(x, n_samples)
sample_f = self.get_samples_target_function(x, n_samples)
diff = self.current_global_opt - sample_f
ei = [
np.max((np.repeat(0, diff[i].shape[0]), diff[i]), axis=0)
for i in range(n_samples)
]
improvement = np.mean(is_feasible * ei, axis=0)
else:
raise NotImplementedError(
"Other ways of computing this functions are not implemented")
return improvement
def get_samples_target_function(self, x: np.ndarray,
n_samples: int) -> np.ndarray:
# Sample from the target function
x_f_augmented = np.concatenate(
[x, np.zeros_like(x)], axis=1)
sample_f = self._sample_from_model(x_f_augmented, self.model, n_samples)
return sample_f
def _sample_from_model(self, x: np.ndarray,
model: gpy_model_wrappers.GPyModelWrapper,
n_samples: int, seed: Optional[int] = 0) -> np.ndarray:
# Sample from GP model
# We avoid changing the seed of the algorithm by storing it, sampling the
# functions and then resetting the old seed
if seed:
old_seed = np.random.get_state()
np.random.seed(seed)
mean, _ = model.predict(x)
cov = model.predict_covariance(x)
sample = np.random.multivariate_normal(mean[:, 0], cov, n_samples)
if seed:
np.random.set_state(old_seed)
return sample
def get_feasibility(self, x: np.ndarray, n_samples: int) -> np.ndarray:
is_feasible = np.ones((n_samples, x.shape[0])).astype(bool)
# Sample from the constraint functions - if there are no constraints
# is_feasible is not changed and all x values are feasible.
if self.model_constraints:
for i, model_c in enumerate(
list(self.model_constraints.values())):
x_augmented = np.concatenate(
[x, np.ones_like(x)*(i+1)], axis=1)
sample_g = self._sample_from_model(x_augmented, model_c, n_samples)
# Get the direction and threshold for the current variable
c_var = self.constraints_dict[self.exploration_set]["vars"][i]
direction = self.constraints[c_var][0]
threshold = self.constraints[c_var][1]
# Check feasibility of all samples for the current variable and merge
# it with the feasibility of the previous one(s)
is_feasible = (
is_feasible & constraints_functions.EVAL_CONSTRAINT_OP[direction](
sample_g, threshold))
return is_feasible
def probability_feasibility(self, x: np.ndarray) -> Optional[None]:
raise NotImplementedError(
"Computation of the probability of feasibility is not implemented.")
def evaluate_to_store(self, x: np.ndarray)-> Any:
"""Evaluate the improvement and probability of feasibility separately."""
return self.get_improvement(x), np.zeros((x.shape[0], 1))
| ccbo-main | acquisitions/multitask_constrained_expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""acquisitions init."""
| ccbo-main | acquisitions/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the causal constrained acquisition functions."""
from typing import Any, Callable, Dict, List, Optional, Tuple
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.acquisitions import constrained_expected_improvement
from ccbo.acquisitions import noisy_expected_improvement
from ccbo.utils import constraints_functions
from ccbo.utils import utilities
class NoisyConstrainedCausalExpectedImprovement(
noisy_expected_improvement.NoisyCausalExpectedImprovement,
constrained_expected_improvement.ConstrainedCausalExpectedImprovement):
"""Implementation of the noisy constrained causal EI acquisition function."""
def __init__(self, task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: Optional[gpy_model_wrappers.GPyModelWrapper],
model_constraints: Optional[gpy_model_wrappers.GPyModelWrapper],
constraints: Dict[str, List[Any]],
constraints_dict: Dict[Tuple[str, ...], Dict[str, Any]],
mean_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
variance_function_constraints: Optional[Callable[[np.ndarray],
np.ndarray]],
exploration_set: Tuple[str, ...],
n_samples: int = 10) -> None:
noisy_expected_improvement.NoisyCausalExpectedImprovement.__init__(
self,
task=task,
mean_function=mean_function,
variance_function=variance_function,
model=model,
n_samples=n_samples)
constrained_expected_improvement.ConstrainedCausalExpectedImprovement.__init__(
self,
current_global_opt=None,
task=task,
mean_function=mean_function,
variance_function=variance_function,
model=model,
model_constraints=model_constraints,
constraints=constraints,
constraints_dict=constraints_dict,
mean_function_constraints=mean_function_constraints,
variance_function_constraints=variance_function_constraints,
exploration_set=exploration_set)
def get_best_feasible_point(
self, x: np.ndarray,
sample_target_fnc: np.ndarray) -> float:
"""Select feasible point in sample_target_fnc by sampling from the constraints."""
# If there are constraints we modify is_feasible otherwise the feasibility
# is one for every input value and the best feasible point is the optimal
# value in sample_target_fnc
is_feasible = np.ones_like(x, dtype=bool)
best_feasible_point = utilities.EVAL_FN[self.task](sample_target_fnc)
# if any(self.mean_function_constraints):
if self.model_constraints:
for p in self.model_constraints.keys():
direction, value = self.constraints[p][0], self.constraints[p][1]
if self.model_constraints[p]:
mean, variance = self.model_constraints[p].predict(x)
if len(variance.shape) == 3:
# The predict function returns a 3-d tensor that we want to reduce
# to the same shape of the inputs
variance = np.squeeze(variance, axis=2)
elif len(variance.shape) > 3:
raise ValueError("Prediction returns a high dimensional tensor!")
else:
mean = self.mean_function_constraints[p](x)
variance = self.variance_function_constraints[p](x).clip(0)
sample = np.random.normal(mean + self.jitter, np.sqrt(variance))
is_feasible = (is_feasible) & (
constraints_functions.EVAL_CONSTRAINT_OP[direction](sample, value))
if np.any(is_feasible):
# There is at least one feasible value. We get the optimal value among
# the feasible points.
best_feasible_point = utilities.EVAL_FN[self.task](
sample_target_fnc[is_feasible])
return best_feasible_point
def get_global_opt(self, mean: np.ndarray, standard_deviation: np.ndarray,
task: Any, x: np.ndarray) -> float:
"""Get one value of feasible global optimum by sampling."""
sample_target_fnc = np.random.normal(mean, standard_deviation)
best_feasible_point = self.get_best_feasible_point(x, sample_target_fnc)
return best_feasible_point
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""Evaluate the Constrained Expected Improvement."""
# Compute get_improvement as in NoisyCausalExpectedImprovement
# and get_probability_feasibility as in ConstrainedCausalExpectedImprovement
return self.get_improvement(x) * self.get_probability_feasibility(x)[:, 0]
def evaluate_to_store(self, x: np.ndarray)-> Tuple[np.ndarray, np.ndarray]:
"""Evaluate the improvement and probability of feasibility separately."""
return self.get_improvement(x), self.get_probability_feasibility(x)[:, 0]
| ccbo-main | acquisitions/noisy_constrained_expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the causal constrained acquisition functions."""
from typing import Any, Callable, Dict, Tuple, Optional, List
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
import scipy.stats
from ccbo.acquisitions import expected_improvement
from ccbo.utils import utilities
class ConstrainedCausalExpectedImprovement(
expected_improvement.CausalExpectedImprovement
):
"""Implementation of the causal constrained EI acquisition function."""
def __init__(
self,
current_global_opt: Optional[float],
task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: Optional[gpy_model_wrappers.GPyModelWrapper],
model_constraints: Optional[gpy_model_wrappers.GPyModelWrapper],
constraints: Dict[str, List[Any]],
constraints_dict: Dict[Tuple[str, ...], Dict[str, Any]],
mean_function_constraints: Optional[
Dict[str, Callable[[np.ndarray], np.ndarray]]
],
variance_function_constraints: Optional[
Dict[str, Callable[[np.ndarray], np.ndarray]]
],
exploration_set: Tuple[str, ...],
) -> None:
base_args = {
"current_global_opt": current_global_opt,
"task": task,
"mean_function": mean_function,
"variance_function": variance_function,
"model": model,
}
super().__init__(**base_args)
self.model_constraints = model_constraints
self.constraints = constraints
self.constraints_dict = constraints_dict
self.mean_function_constraints = mean_function_constraints
self.variance_function_constraints = variance_function_constraints
self.exploration_set = exploration_set
def get_probability_feasibility(self, x: np.ndarray) -> Any:
"""Compute the probability of feasibility."""
probability_feasibility = np.ones((x.shape[0], 1))
# Check if constraints exist for the given exploration set
# self.mean_function_constraints is an empty dict if the exploration
# set does not have constraints. With any we return False in this case.
# if any(self.mean_function_constraints):
if self.model_constraints:
for p in self.model_constraints.keys():
direction, value = self.constraints[p][0], self.constraints[p][1]
if self.model_constraints[p]:
mean, variance = self.model_constraints[p].predict(x)
if len(variance.shape) == 3:
# The predict function returns a 3-d tensor that we want to reduce
# to the same shape of the inputs
variance = np.squeeze(variance, axis=2)
elif len(variance.shape) > 3:
raise ValueError("Prediction returns a high dimensional tensor!")
else:
assert self.mean_function_constraints
assert self.variance_function_constraints
mean = self.mean_function_constraints[p](x)
variance = self.variance_function_constraints[p](x).clip(0)
standardized_value = (value - mean) / np.sqrt(variance)
if direction == utilities.Direction.LOWER:
probability_feasibility *= scipy.stats.norm.cdf(standardized_value)
else:
probability_feasibility *= 1 - scipy.stats.norm.cdf(
standardized_value)
return probability_feasibility
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""Evaluate the Constrained Expected Improvement."""
return self.get_improvement(x) * self.get_probability_feasibility(x)
def evaluate_to_store(self, x: np.ndarray)-> Any:
"""Evaluate the improvement and probability of feasibility separately."""
return self.get_improvement(x), self.get_probability_feasibility(x)
| ccbo-main | acquisitions/constrained_expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the causal acquisition functions."""
import functools
from typing import Any, Callable, Dict, List, Optional, OrderedDict, Tuple
from emukit.core import acquisition as acq
from emukit.core import parameter_space
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.acquisitions import constrained_expected_improvement as con_ei
from ccbo.acquisitions import expected_improvement
from ccbo.acquisitions import multitask_constrained_expected_improvement as multitask_con_ei
from ccbo.acquisitions import noisy_constrained_expected_improvement as noisy_con_ei
from ccbo.acquisitions import noisy_expected_improvement as noisy_ei
from ccbo.acquisitions import noisy_multitask_constrained_expected_improvement as noisy_multitask_ei
from ccbo.utils import cost_utils
from ccbo.utils import initialisation_utils as init_utils
from ccbo.utils import plotting_utils
from ccbo.utils import utilities
def numerical_optimization(
acquisition: acq.Acquisition,
inputs: np.ndarray,
exploration_set: Tuple[str, ...],
) -> Tuple[Any, Any]:
"""Numerically optimize a function evaluating it on the inputs."""
# Finds the new best point by evaluating the function in a set of inputs
_, d = inputs.shape
improvements = acquisition.evaluate(inputs)
# Notice that here we always want to maximize the acquisition function as we
# have multiplied the improvement with a minus sign when solving a
# maximization problem.
idx = np.argmax(improvements)
# Get point with best improvement, the x new should be taken from the inputs
x_new = inputs[idx]
y_new = np.max(improvements)
# Reshape point
if len(x_new.shape) == 1 and len(exploration_set) == 1:
x_new = utilities.make_column_shape_2d(x_new)
elif len(exploration_set) > 1 and len(x_new.shape) == 1:
x_new = x_new.reshape(1, -1)
else:
raise ValueError("The new point is not an array.")
if x_new.shape[0] == d:
# The function make_column_shape_2d might convert a (d, ) array
# in a (d,1) array that needs to be reshaped
x_new = np.transpose(x_new)
assert x_new.shape[1] == inputs.shape[1], "New point has a wrong dimension"
return y_new, x_new
def optimize_acquisition(
acquisition: acq.Acquisition,
intervention_domain: parameter_space.ParameterSpace,
exploration_set: Tuple[str, ...],
cost_functions: OrderedDict[str, Callable[[Any], Any]],
target: str,
num_anchor_points: int = 100,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
)-> Tuple[np.ndarray, np.ndarray]:
"""Optimize the acquisition function rescaled by the cost."""
assert isinstance(intervention_domain, parameter_space.ParameterSpace)
dim = intervention_domain.dimensionality
assert dim == len(exploration_set)
cost_of_acquisition = cost_utils.Cost(cost_functions, exploration_set, target)
acquisition_over_cost = (acquisition / cost_of_acquisition)
if dim > 1:
num_anchor_points = int(np.sqrt(num_anchor_points))
if sample_anchor_points:
# Ensure the points are different every time we call the function
if seed_anchor_points is not None:
np.random.seed(seed_anchor_points)
else:
np.random.seed()
sampled_points = intervention_domain.sample_uniform(
point_count=num_anchor_points)
else:
limits = [list(tup) for tup in intervention_domain.get_bounds()]
sampled_points = init_utils.create_n_dimensional_intervention_grid(
limits, num_anchor_points)
y_new, x_new = numerical_optimization(acquisition_over_cost, sampled_points,
exploration_set)
return y_new, x_new
def evaluate_acquisition_function(
intervention_domain: parameter_space.ParameterSpace,
bo_model: Optional[gpy_model_wrappers.GPyModelWrapper],
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
current_global_opt: float,
exploration_set: Tuple[str, ...],
cost_functions: OrderedDict[str, Callable[[Any], Any]],
task: utilities.Task,
target: str,
noisy_acquisition: bool = False,
num_anchor_points: int = 100,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
verbose: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Define and optimize the acquisition function for a unconstrained problem."""
if noisy_acquisition:
# When working with noisy observations different plausible
# current_global_opt are obtained by repeatedly sampling from the bo_model
expected_improvement_cls = noisy_ei.NoisyCausalExpectedImprovement
else:
expected_improvement_cls = functools.partial(
expected_improvement.CausalExpectedImprovement, current_global_opt)
acquisition = expected_improvement_cls(task, mean_function, variance_function,
bo_model)
y_acquisition, x_new = optimize_acquisition(
acquisition, intervention_domain, exploration_set, cost_functions,
target, num_anchor_points, sample_anchor_points, seed_anchor_points)
if verbose:
# Plot acquisition function. We avoid changing the global seed by
# storing it and refixing it after the evaluation
old_seed = np.random.get_state()
np.random.seed(0)
limits = [list(tup) for tup in intervention_domain.get_bounds()]
sampled_points = init_utils.create_n_dimensional_intervention_grid(
limits, num_anchor_points)
improvement = acquisition.evaluate(sampled_points)
np.random.set_state(old_seed)
plotting_utils.plot_acquisition(sampled_points, improvement, x_new)
return y_acquisition, x_new
def evaluate_constrained_acquisition_function(
intervention_domain: parameter_space.ParameterSpace,
bo_model: Optional[gpy_model_wrappers.GPyModelWrapper],
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
current_global_opt: float,
exploration_set: Tuple[str, ...],
cost_functions: OrderedDict[str, Callable[[Any], Any]],
task: utilities.Task,
target: str,
bo_model_constraints: Optional[gpy_model_wrappers.GPyModelWrapper],
mean_function_constraints: Callable[[np.ndarray], np.ndarray],
variance_function_constraints: Callable[[np.ndarray], np.ndarray],
constraints: Dict[str, List[Any]], constraints_dict: Dict[Tuple[str, ...],
Dict[str, Any]],
verbose: bool = False,
noisy_acquisition: bool = False,
num_anchor_points: int = 100,
sample_anchor_points: bool = False,
seed_anchor_points: Optional[int] = None,
multi_task_model: bool = False
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Define and optimize the acquisition functions for a constrained problem."""
# Define acquisition function
if not multi_task_model:
if noisy_acquisition:
expected_improvement_cls = (
noisy_con_ei.NoisyConstrainedCausalExpectedImprovement)
else:
expected_improvement_cls = functools.partial(
con_ei.ConstrainedCausalExpectedImprovement, current_global_opt)
else:
if noisy_acquisition:
expected_improvement_cls = (
noisy_multitask_ei.NoisyMultiTaskConstrainedCausalExpectedImprovement)
else:
expected_improvement_cls = functools.partial(
multitask_con_ei.MultiTaskConstrainedCausalExpectedImprovement,
current_global_opt)
acquisition = expected_improvement_cls(task, mean_function, variance_function,
bo_model, bo_model_constraints,
constraints, constraints_dict,
mean_function_constraints,
variance_function_constraints,
exploration_set)
# Get new point
y_acquisition, x_new = optimize_acquisition(
acquisition, intervention_domain, exploration_set, cost_functions,
target, num_anchor_points, sample_anchor_points, seed_anchor_points)
# Plot the acquisition function is es is one dimensional and verbose is True
if verbose and len(exploration_set) == 1:
# Plot acquisition function. We avoid changing the global seed by
# storing it and refixing it after the evaluation
old_seed = np.random.get_state()
np.random.seed(0)
# Evaluate improvement and feasibility separately to plot them
limits = [list(tup) for tup in intervention_domain.get_bounds()]
sampled_points = init_utils.create_n_dimensional_intervention_grid(
limits, num_anchor_points)
improvement, probability_feasibility = acquisition.evaluate_to_store(
sampled_points)
plotting_utils.plot_acquisition(
sampled_points,
improvement,
probability_feasibility=probability_feasibility,
multi_task_model=multi_task_model,
x_new=x_new)
np.random.set_state(old_seed)
else:
improvement = np.zeros((num_anchor_points, 1))
probability_feasibility = np.zeros((num_anchor_points, 1))
return y_acquisition, x_new, improvement, probability_feasibility
| ccbo-main | acquisitions/evaluate_acquisitions.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the constrained causal acquisition functions."""
from typing import Callable, Any
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.acquisitions import expected_improvement
from ccbo.utils import utilities
class NoisyCausalExpectedImprovement(
expected_improvement.CausalExpectedImprovement
):
"""Causal expected improvement acquisition function."""
def __init__(
self,
task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: gpy_model_wrappers.GPyModelWrapper,
previous_variance: float = 1.0,
jitter: float = 0.0,
n_samples: int = 10
) -> None:
base_args = {
"current_global_opt": None,
"task": task,
"mean_function": mean_function,
"variance_function": variance_function,
"previous_variance": previous_variance,
"jitter": jitter,
"model": model}
expected_improvement.CausalExpectedImprovement.__init__(self, **base_args)
# How many samples to get from the given model to compute the current global
# optimum and then evaluate the improvement
self.n_samples = n_samples
def get_global_opt(self,
mean: np.ndarray,
standard_deviation: np.ndarray,
task: Any,
*unused_args) -> float:
"""Get one value of global optimum by sampling."""
return task(np.random.normal(mean, standard_deviation))
def get_improvement(self, x: np.ndarray) -> np.ndarray:
"""Compute the expected improvement."""
out = []
for _ in range(self.n_samples):
mean, standard_deviation = self.get_mean_std_for_improvement(x)
current_global_opt = self.get_global_opt(mean, standard_deviation,
utilities.EVAL_FN[self.task], x)
improvement = self.get_improvement_for_current_opt(
current_global_opt, mean, standard_deviation)
if self.task.value == utilities.Task.MAX.value:
improvement *= -1
out.append(improvement)
return np.mean(np.hstack(out), axis=1)
| ccbo-main | acquisitions/noisy_expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the constrained causal acquisition functions."""
from typing import Callable, Tuple, Optional, Any
from emukit.core import acquisition as acq
from emukit.model_wrappers import gpy_model_wrappers
import numpy as np
from ccbo.utils import utilities
class CausalExpectedImprovement(acq.Acquisition):
"""Causal expected improvement acquisition function."""
def __init__(
self,
current_global_opt: Optional[float],
task: utilities.Task,
mean_function: Callable[[np.ndarray], np.ndarray],
variance_function: Callable[[np.ndarray], np.ndarray],
model: gpy_model_wrappers.GPyModelWrapper,
previous_variance: float = 1.0,
jitter: float = 0.0
) -> None:
self.model = model
self.mean_function = mean_function
self.previous_variance = previous_variance
self.variance_function = variance_function
self.jitter = jitter
self.current_global_opt = current_global_opt
self.task = task
self.gradients = False
def get_improvement(self, x: np.ndarray) -> np.ndarray:
"""Compute the expected improvement."""
mean, standard_deviation = self.get_mean_std_for_improvement(x)
improvement = self.get_improvement_for_current_opt(
self.current_global_opt, mean, standard_deviation)
if self.task.value == utilities.Task.MAX.value:
improvement *= -1
return improvement
def get_mean_std_for_improvement(
self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get mean and std at x to be used to compute the expected improvement."""
if self.model:
mean, variance = self.model.predict(x)
else:
mean = self.mean_function(x)
variance = self.previous_variance * np.ones(
(x.shape[0], 1)) + self.variance_function(x)
# Variance computed with MonteCarlo leads to numerical instability
# This is ensuring that negative values or nan values are not generated
if np.any(np.isnan(variance)):
variance[np.isnan(variance)] = 0
elif np.any(variance < 0):
variance = variance.clip(0.0001)
standard_deviation = np.sqrt(variance)
mean += self.jitter
return mean, standard_deviation
def get_improvement_for_current_opt(
self,
current_global_opt: float,
mean: np.ndarray,
standard_deviation: np.ndarray,
) -> np.ndarray:
"""Get improvement wrt the given global minimum and with given params."""
u, pdf, cdf = utilities.get_standard_normal_pdf_cdf(
current_global_opt, mean, standard_deviation
)
return standard_deviation * (u * cdf + pdf)
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""Evaluate the causal EI."""
return self.get_improvement(x)
def evaluate_with_gradients(
self, x: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the causal EI and its derivative."""
raise NotImplementedError("Not implemented for this class.")
def evaluate_to_store(self, x: np.ndarray) -> Any:
raise NotImplementedError("Not implemented for this class.")
@property
def has_gradients(self) -> bool:
"""Returns that this acquisition does not have gradients."""
# This function is needed to comply with emukit requirements
return self.gradients
| ccbo-main | acquisitions/expected_improvement.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""scm_examples init."""
| ccbo-main | scm_examples/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define a toy graph.
Define the variables and the scm for a toy graph example.
"""
from __future__ import annotations
import collections
from typing import Any, Dict, List, Optional
from networkx.classes import multidigraph
from networkx.drawing import nx_agraph
import pygraphviz
from ccbo.scm_examples import base
from ccbo.utils import graph_functions
from ccbo.utils import utilities
class Scm(base.ScmExample):
"""Basic SCM."""
def __init__(
self,
variables: Optional[Dict[str, List[Any]]] = None,
constraints: Optional[Dict[str, List[Any]]] = None,
scm_funcs: Optional[collections.OrderedDict[str, Any]] = None,
graph: Optional[multidigraph.MultiDiGraph] = None):
if variables is None:
variables = {
"X": ["m", [-4, 1]],
"Z": ["m", [-3, 3]],
"Y": ["t"],
}
if constraints is None:
constraints = {
"X": [utilities.Direction.LOWER, 1],
"Z": [utilities.Direction.HIGHER, 1]
}
if scm_funcs is None:
scm_funcs = self.scm_funcs()
if graph is None:
graph = self.graph()
args = {
"variables": variables,
"constraints": constraints,
"scm_funcs": scm_funcs,
"graph": graph
}
super().__init__(**args)
def scm_funcs(self) -> collections.OrderedDict[str, Any]:
x = lambda noise, sample: noise
z = lambda noise, sample: 2. * sample["X"] + noise
y = lambda noise, sample: -1 * sample["Z"] + noise
return collections.OrderedDict([("X", x), ("Z", z), ("Y", y)])
def graph(self) -> multidigraph.MultiDiGraph:
"""Define graph topology."""
dag_view = graph_functions.make_graphical_model(
topology="dependent", nodes=["X", "Z", "Y"], verbose=True)
dag = nx_agraph.from_agraph(pygraphviz.AGraph(dag_view.source))
return dag
| ccbo-main | scm_examples/scm.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define a base class for all the DAG examples."""
from __future__ import annotations
import collections
import copy
from typing import Any, Dict, Optional, Tuple
from networkx.classes import multidigraph
import numpy as np
from ccbo.utils import constraints_functions
from ccbo.utils import initialisation_utils
from ccbo.utils import intervention_functions
from ccbo.utils import utilities
class ScmExample:
"""Base class for the SCM examples."""
def __init__(self, variables: Dict[str, list[Any]],
constraints: Dict[str, list[Any]],
scm_funcs: collections.OrderedDict[str, Any],
graph: multidigraph.MultiDiGraph):
self.variables = variables
self.constraints = constraints
self.scm_funcs = scm_funcs
self.graph = graph
def get_target_name(self) -> str:
# Get the name of the target variable as a string
target_variable = [
key for key, values in self.variables.items()
if values[0] == utilities.VariableType.TARGET.value
][0]
return target_variable
def setup(
self,
exploration_sets: Optional[list[Tuple[str, ...]]] = None,
n_grid_points: int = 100,
n_samples: int = 1,
sampling_seed: Optional[int] = 1,
task: utilities.Task = utilities.Task.MIN,
) -> Tuple[list[str], list[Tuple[str, ...]], Dict[str, Any], Any,
Dict[Tuple[str, ...], Optional[np.ndarray[Any, np.dtype]]],
Dict[str, Dict[str, list[float]]], Any, Any, Any, Tuple[str, ...]]:
"""Setup variables and dictionaries needed for optimization."""
dict_variables = self.variables
scm_funcs = self.scm_funcs
constraints = self.constraints
# Get set of variables
target_variable = self.get_target_name()
manipulative_variables = [
key for key, values in dict_variables.items()
if values[0] == utilities.VariableType.MANIPULATIVE.value
]
protected_variables = list(constraints.keys())
# Get graph structure
graph = self.graph
# Specify all the exploration sets based on the manipulative variables
if exploration_sets is None:
exploration_sets = list(
initialisation_utils.powerset(manipulative_variables))
# Specify the intervention domain for each variable
intervention_domain = {
key: dict_variables[key][1] for key in manipulative_variables
}
# Set up a grid for each es and use it to find the best intervention value
interventional_grids = initialisation_utils.get_interventional_grids(
exploration_sets,
intervention_domain,
size_intervention_grid=n_grid_points,
)
# Compute unconstrained ground truth optimal interventions
_, _, optimal_uncontrained_y, _, _, all_ce = (
intervention_functions.get_optimal_interventions(
exploration_sets=exploration_sets,
interventional_grids=interventional_grids,
scm_funcs=scm_funcs,
graph=graph,
model_variables=list(dict_variables.keys()),
target_variable=target_variable,
n_samples=n_samples,
sampling_seed=sampling_seed,
)
)
# Store the initial interventinal grid and all_ce before changing it
# to find the constrained solution
complete_interventional_grid = copy.deepcopy(interventional_grids)
complete_all_ce = copy.deepcopy(all_ce)
# Get number and constrained variables for each intervention set
constraints_dict = constraints_functions.get_constraints_dict(
exploration_sets, protected_variables, target_variable, graph)
# Compute constraints functions on the interventional grids
constraints_values = constraints_functions.compute_constraints_functions(
exploration_sets,
constraints_dict,
interventional_grids,
scm_funcs,
graph,
dict_variables,
n_samples=n_samples,
sampling_seed=sampling_seed)
# Find optimal constrained output value
is_feasible = False
while not is_feasible:
(is_feasible, optimal_set, opt_level,
optimal_y) = constraints_functions.verify_feasibility(
exploration_sets,
all_ce,
constraints,
constraints_dict,
scm_funcs,
graph,
dict_variables,
interventional_grids,
n_samples=n_samples,
sampling_seed=sampling_seed,
task=task)
if not is_feasible:
# Remove unfeasible target values
# Remove unfeasible intervention level from interventional grid
all_ce[optimal_set].remove(optimal_y)
condition = True
for i in range(opt_level.shape[0]):
condition = condition & (
interventional_grids[optimal_set][:, i] == opt_level[i])
interventional_grids[optimal_set] = interventional_grids[optimal_set][
~condition]
return (manipulative_variables, exploration_sets, intervention_domain,
complete_all_ce, complete_interventional_grid, constraints_values,
optimal_uncontrained_y, optimal_y, opt_level, optimal_set)
| ccbo-main | scm_examples/base.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
from setuptools import find_namespace_packages
from setuptools import setup
def _get_version():
with open('jraph/__init__.py') as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=')+1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `jraph/__init__.py`')
setup(
name='jraph',
version=_get_version(),
url='https://github.com/deepmind/jraph',
license='Apache 2.0',
author='DeepMind',
description=('Jraph: A library for Graph Neural Networks in Jax'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='jax graph neural networks python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
package_data={'jraph': ['ogb_examples/test_data/*']},
python_requires='>=3.6',
install_requires=[
'jax>=0.1.55',
'jaxlib>=0.1.37',
'numpy>=1.18.0',
],
extras_require={'examples': ['dm-haiku>=0.0.2', 'absl-py>=0.9',
'frozendict>=2.0.2', 'optax>=0.0.1',
'scipy>=1.2.1'],
'ogb_examples': ['dm-haiku>=0.0.2', 'absl-py>=0.9',
'optax>=0.0.1', 'pandas>=1.0.5',
'dm-tree>=0.1.5']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| jraph-master | setup.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import inspect
import os
import sys
import typing
import jraph
def _add_annotations_import(path):
"""Appends a future annotations import to the file at the given path."""
with open(path) as f:
contents = f.read()
# If we run sphinx multiple times then we will append the future import
# multiple times too, so this check is here to prevent that.
if contents.startswith('from __future__ import annotations'):
return
assert contents.startswith('#'), (path, contents.split('\n')[0])
with open(path, 'w') as f:
# NOTE: This is subtle and not unit tested, we're prefixing the first line
# in each Python file with this future import. It is important to prefix
# not insert a newline such that source code locations are accurate (we link
# to GitHub). The assertion above ensures that the first line in the file is
# a comment so it is safe to prefix it.
f.write('from __future__ import annotations ')
f.write(contents)
def _recursive_add_annotations_import():
for path, _, files in os.walk('../jraph/'):
for file in files:
if file.endswith('.py'):
_add_annotations_import(os.path.abspath(os.path.join(path, file)))
if 'READTHEDOCS' in os.environ:
_recursive_add_annotations_import()
typing.get_type_hints = lambda obj, *unused: obj.__annotations__
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
# -- Project information -----------------------------------------------------
project = 'Jraph'
copyright = '2021, Jraph Authors' # pylint: disable=redefined-builtin
author = 'Jraph Authors'
# The full version, including alpha/beta/rc tags
release = '0.0.1.dev'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'sphinx_autodoc_typehints',
]
pygments_style = 'sphinx'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
return 'https://github.com/deepmind/jraph/blob/master/jraph/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
jraph.__file__)), lineno, lineno + len(source) - 1)
| jraph-master | docs/conf.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jraph."""
from jraph._src.graph import GraphsTuple
from jraph._src.models import AggregateEdgesToGlobalsFn
from jraph._src.models import AggregateEdgesToNodesFn
from jraph._src.models import AggregateNodesToGlobalsFn
from jraph._src.models import AttentionLogitFn
from jraph._src.models import AttentionReduceFn
from jraph._src.models import DeepSets
from jraph._src.models import EmbedEdgeFn
from jraph._src.models import EmbedGlobalFn
from jraph._src.models import EmbedNodeFn
from jraph._src.models import GAT
from jraph._src.models import GATAttentionLogitFn
from jraph._src.models import GATAttentionQueryFn
from jraph._src.models import GATNodeUpdateFn
from jraph._src.models import GNUpdateEdgeFn
from jraph._src.models import GNUpdateGlobalFn
from jraph._src.models import GNUpdateNodeFn
from jraph._src.models import GraphConvolution
from jraph._src.models import GraphMapFeatures
from jraph._src.models import GraphNetGAT
from jraph._src.models import GraphNetwork
from jraph._src.models import InteractionNetwork
from jraph._src.models import InteractionUpdateEdgeFn
from jraph._src.models import InteractionUpdateNodeFn
from jraph._src.models import NodeFeatures
from jraph._src.models import RelationNetwork
from jraph._src.utils import ArrayTree
from jraph._src.utils import batch
from jraph._src.utils import batch_np
from jraph._src.utils import concatenated_args
from jraph._src.utils import dynamically_batch
from jraph._src.utils import get_edge_padding_mask
from jraph._src.utils import get_fully_connected_graph
from jraph._src.utils import get_graph_padding_mask
from jraph._src.utils import get_node_padding_mask
from jraph._src.utils import get_number_of_padding_with_graphs_edges
from jraph._src.utils import get_number_of_padding_with_graphs_graphs
from jraph._src.utils import get_number_of_padding_with_graphs_nodes
from jraph._src.utils import pad_with_graphs
from jraph._src.utils import partition_softmax
from jraph._src.utils import segment_max
from jraph._src.utils import segment_max_or_constant
from jraph._src.utils import segment_mean
from jraph._src.utils import segment_min
from jraph._src.utils import segment_min_or_constant
from jraph._src.utils import segment_normalize
from jraph._src.utils import segment_softmax
from jraph._src.utils import segment_sum
from jraph._src.utils import segment_variance
from jraph._src.utils import sparse_matrix_to_graphs_tuple
from jraph._src.utils import unbatch
from jraph._src.utils import unbatch_np
from jraph._src.utils import unpad_with_graphs
from jraph._src.utils import with_zero_out_padding_outputs
from jraph._src.utils import zero_out_padding
__version__ = "0.0.6.dev0"
__all__ = ("ArrayTree", "DeepSets", "GraphConvolution", "GraphMapFeatures",
"InteractionNetwork", "RelationNetwork", "GraphNetGAT", "GAT",
"GraphsTuple", "GraphNetwork", "NodeFeatures",
"AggregateEdgesToNodesFn", "AggregateNodesToGlobalsFn",
"AggregateEdgesToGlobalsFn", "AttentionLogitFn", "AttentionReduceFn",
"GNUpdateEdgeFn", "GNUpdateNodeFn", "GNUpdateGlobalFn",
"InteractionUpdateNodeFn", "InteractionUpdateEdgeFn", "EmbedEdgeFn",
"EmbedNodeFn", "EmbedGlobalFn", "GATAttentionQueryFn",
"GATAttentionLogitFn", "GATNodeUpdateFn", "batch", "batch_np",
"unbatch", "unbatch_np", "pad_with_graphs",
"get_number_of_padding_with_graphs_graphs",
"get_number_of_padding_with_graphs_nodes",
"get_number_of_padding_with_graphs_edges", "unpad_with_graphs",
"get_node_padding_mask", "get_edge_padding_mask",
"get_graph_padding_mask", "segment_max", "segment_max_or_constant",
"segment_min_or_constant", "segment_softmax", "segment_sum",
"partition_softmax", "concatenated_args",
"get_fully_connected_graph", "dynamically_batch",
"with_zero_out_padding_outputs", "zero_out_padding",
"sparse_matrix_to_graphs_tuple")
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Jraph public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
try:
del _src # pylint: disable=undefined-variable
except NameError:
pass
| jraph-master | jraph/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sharded graphnet."""
import functools
import os
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax.lib import xla_bridge
import jax.tree_util as tree
import jraph
from jraph._src import utils
from jraph.experimental import sharded_graphnet
import numpy as np
def _get_graphs_from_n_edge(n_edge):
"""Get a graphs tuple from n_edge."""
graphs = []
for el in n_edge:
graphs.append(
jraph.GraphsTuple(
nodes=np.random.uniform(size=(128, 2)),
edges=np.random.uniform(size=(el, 2)),
senders=np.random.choice(128, el),
receivers=np.random.choice(128, el),
n_edge=np.array([el]),
n_node=np.array([128]),
globals=np.array([[el]]),
))
graphs = utils.batch_np(graphs)
return graphs
def get_graphs_tuples(n_edge, sharded_n_edge, device_graph_idx):
sharded_n_edge = np.array(sharded_n_edge)
device_graph_idx = np.array(device_graph_idx)
devices = len(sharded_n_edge)
graphs = _get_graphs_from_n_edge(n_edge)
sharded_senders = np.reshape(graphs.senders, [devices, -1])
sharded_receivers = np.reshape(graphs.receivers, [devices, -1])
sharded_edges = np.reshape(graphs.edges,
[devices, -1, graphs.edges.shape[-1]])
# Broadcast replicated features to have a devices leading axis.
broadcast = lambda x: np.broadcast_to(x[None, :], [devices] + list(x.shape))
sharded_graphs = sharded_graphnet.ShardedEdgesGraphsTuple(
device_senders=sharded_senders,
device_receivers=sharded_receivers,
device_edges=sharded_edges,
device_n_edge=sharded_n_edge,
nodes=broadcast(graphs.nodes),
senders=broadcast(graphs.senders),
receivers=broadcast(graphs.receivers),
device_graph_idx=device_graph_idx,
globals=broadcast(graphs.globals),
n_node=broadcast(graphs.n_node),
n_edge=broadcast(graphs.n_edge))
return graphs, sharded_graphs
class ShardedGraphnetTest(parameterized.TestCase):
def setUp(self):
super().setUp()
os.environ[
'XLA_FLAGS'] = '--xla_force_host_platform_device_count=3'
xla_bridge.get_backend.cache_clear()
@parameterized.named_parameters(
('split_3_to_4', [3, 5, 4], [[3, 3], [2, 4]], [[0, 1], [1, 2]]),
('split_zero_last_edge', [1, 2, 5, 4], [[1, 2, 3], [2, 4, 0]
], [[0, 1, 2], [2, 3, 0]]),
('split_one_over_multiple', [1, 11], [[1, 3], [4, 0], [4, 0]
], [[0, 1], [1, 0], [1, 0]]))
def test_get_sharded_graphs_tuple(self, n_edge, sharded_n_edge,
device_graph_idx):
in_tuple, expect_tuple = get_graphs_tuples(n_edge, sharded_n_edge,
device_graph_idx)
out_tuple = sharded_graphnet.graphs_tuple_to_broadcasted_sharded_graphs_tuple(
in_tuple, num_shards=len(expect_tuple.nodes))
tree.tree_map(np.testing.assert_almost_equal, out_tuple, expect_tuple)
@parameterized.named_parameters(
('split_intermediate', [3, 5, 4, 3, 3]),
('split_zero_last_edge', [1, 2, 5, 4, 6]),
('split_one_over_multiple', [1, 11]))
def test_sharded_same_as_non_sharded(self, n_edge):
in_tuple = _get_graphs_from_n_edge(n_edge)
devices = 3
sharded_tuple = sharded_graphnet.graphs_tuple_to_broadcasted_sharded_graphs_tuple(
in_tuple, devices)
update_fn = jraph.concatenated_args(lambda x: x)
sharded_gn = sharded_graphnet.ShardedEdgesGraphNetwork(
update_fn, update_fn, update_fn, num_shards=devices)
gn = jraph.GraphNetwork(update_fn, update_fn, update_fn)
sharded_out = jax.pmap(sharded_gn, axis_name='i')(sharded_tuple)
expected_out = gn(in_tuple)
reduced_out = sharded_graphnet.broadcasted_sharded_graphs_tuple_to_graphs_tuple(
sharded_out)
jax.tree_util.tree_map(
functools.partial(np.testing.assert_allclose, atol=1E-5, rtol=1E-5),
expected_out, reduced_out)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/experimental/sharded_graphnet_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sharded (Data Parallel) Graph Nets."""
import functools
from typing import Callable, List, NamedTuple, Optional
import jax
import jax.numpy as jnp
import jax.tree_util as tree
import jraph
from jraph._src import graph as gn_graph
from jraph._src import utils
import numpy as np
class ShardedEdgesGraphsTuple(NamedTuple):
"""A `GraphsTuple` for use with `ShardedEdgesGraphNetwork`.
NOTES:
- A ShardedEdgesGraphNetwork is for use with `jax.pmap`. As such, it will have
a leading axis of size `num_devices` on the host, but no such axis on
device. Non-sharded data is replicated on each device. To achieve this with
`jax.pmap` you can broadcast non-sharded data to have leading axis
'num_devices' or use the 'in_axes' parameter, which will indicate which
attributes should be replicated and which should not. Current helper methods
use the first approach.
- It is recommended that you constructed ShardedEdgesGraphsTuples with
`graphs_tuple_to_broadcasted_sharded_grahs_tuple`.
The values of `nodes`, `device_edges` and `globals` can be gn_graph.ArrayTree
- nests of features with `jax` compatible values. For example, `nodes` in a
graph may have more than one type of attribute.
However, the ShardedEdgesGraphsTuple typically takes the following form for a
batch of `n` graphs:
- n_node: The number of nodes per graph. It is a vector of integers with shape
`[n_graphs]`, such that `graph.n_node[i]` is the number of nodes in the i-th
graph.
- n_edge: The number of edges per graph. It is a vector of integers with shape
`[n_graphs]`, such that `graph.n_edge[i]` is the number of edges in the i-th
graph.
- nodes: The nodes features. It is either `None` (the graph has no node
features), or a vector of shape `[n_nodes] + node_shape`, where
`n_nodes = sum(graph.n_node)` is the total number of nodes in the batch of
graphs, and `node_shape` represents the shape of the features of each node.
The relative index of a node from the batched version can be recovered from
the `graph.n_node` property. For instance, the second node of the third
graph will have its features in the
`1 + graph.n_node[0] + graph.n_node[1]`-th slot of graph.nodes.
Observe that having a `None` value for this field does not mean that the
graphs have no nodes, only that they do not have node features.
- receivers: The indices of the receiver nodes, for each edge. It is either
`None` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that `graph.receivers[i]` is the index of the node
receiving from the i-th edge.
Observe that the index is absolute (in other words, cumulative), i.e.
`graphs.receivers` take value in `[0, n_nodes]`. For instance, an edge
connecting the vertices with relative indices 2 and 3 in the second graph of
the batch would have a `receivers` value of `3 + graph.n_node[0]`.
If `graphs.receivers` is `None`, then `graphs.edges` and `graphs.senders`
should also be `None`.
- senders: The indices of the sender nodes, for each edge. It is either
`None` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that `graph.senders[i]` is the index of the node
sending from the i-th edge.
Observe that the index is absolute, i.e. `graphs.senders` take value in
`[0, n_nodes]`. For instance, an edge connecting the vertices with relative
indices 1 and 3 in the third graph of the batch would have a `senders` value
of `1 + graph.n_node[0] + graph.n_node[1]`.
If `graphs.senders` is `None`, then `graphs.edges` and `graphs.receivers`
should also be `None`.
- globals: The global features of the graph. It is either `None` (the graph
has no global features), or a vector of shape `[n_graphs] + global_shape`
representing graph level features.
The ShardedEdgesGraphsTuple also contains device-local attributes that are
used for data parallel computation. On the host, each of these attributes will
have an additional leading axis of shape `num_devices` for use with
`jax.pmap`, but this is ommited in the following documentation.
- device_edges: The subset of the edge features that are on the device.
It is either `None` (the graph has no edge features), or a vector of
shape `[num_edges / num_devices] + edge_shape`
Observe that having a `None` value for this field does not mean that the
graph has no edges, only that they do not have edge features.
- device_senders: The sender indices of edges on device. This is of length
num_edges / num_devices.
- device_receivers: The receiver indices of edge on device. This is of length
num_edges / num_devices.
- device_n_edge: The graph partitions of the edges on device. For example,
say that there are 2 graphs in the original graphs tuple, with n_edge
[1, 11], which has been split over 3 devices. The `device_n_edge`s would
be [[1, 3], [4, 0], [4, 0]]. `0` valued entries that are padding values or
graphs with zero edges are not distinguished. Since these attributes are
used only for `repeat` purposes, the difference makes no difference to
the implementation.
- device_graph_idx: The indices of the graphs on device. For example, say
that there are 5 graphs in the original graphs tuple, and these has been
split over 3 devices, the device_graphs_idxs could be
[[0, 1, 2], [2, 3, 0], [3, 4, 0]]. In this splitting, the third graph
is split over 2 devices. If a `0` is the first in `device_graph_idx` then
that indicates the first graph, otherwise it indicates a padding value.
"""
nodes: gn_graph.ArrayTree
device_edges: gn_graph.ArrayTree
device_receivers: jnp.ndarray # with integer dtype
device_senders: jnp.ndarray # with integer dtype
receivers: jnp.ndarray # with integer dtype
senders: jnp.ndarray # with integer dtype
globals: gn_graph.ArrayTree
device_n_edge: jnp.ndarray # with integer dtype
n_node: jnp.ndarray # with integer dtype
n_edge: jnp.ndarray # with integer dtype
device_graph_idx: jnp.ndarray # with integer dtype
def graphs_tuple_to_broadcasted_sharded_graphs_tuple(
graphs_tuple: jraph.GraphsTuple,
num_shards: int) -> ShardedEdgesGraphsTuple:
"""Converts a `GraphsTuple` to a `ShardedEdgesGraphsTuple` to use with `pmap`.
For a given number of shards this will compute device-local edge and graph
attributes, and add a batch axis of size num_shards. You can then use
`ShardedEdgesGraphNetwork` with `jax.pmap`.
Args:
graphs_tuple: The `GraphsTuple` to be converted to a sharded `GraphsTuple`.
num_shards: The number of devices to shard over.
Returns:
A ShardedEdgesGraphsTuple over the number of shards.
"""
# Note: this is not jittable, so to prevent using a device by accident,
# this is all happening in numpy.
nodes, edges, receivers, senders, globals_, n_node, n_edge = graphs_tuple
if np.sum(n_edge) % num_shards != 0:
raise ValueError(('The number of edges in a `graph.GraphsTuple` must be '
'divisible by the number of devices per replica.'))
if np.sum(np.array(n_edge)) == 0:
raise ValueError('The input `Graphstuple` must have edges.')
# Broadcast replicated features to have a `num_shards` leading axis.
# pylint: disable=g-long-lambda
broadcast = lambda x: np.broadcast_to(x[None, :], (num_shards,) + x.shape)
# pylint: enable=g-long-lambda
# `edges` will be straightforwardly sharded, with 1/num_shards of
# the edges on each device.
def shard_edges(edge_features):
return np.reshape(edge_features, (num_shards, -1) + edge_features.shape[1:])
edges = jax.tree_map(shard_edges, edges)
# Our sharded strategy is by edges - which means we need a device local
# n_edge, senders and receivers to do global aggregations.
# Senders and receivers are easy - 1/num_shards per device.
device_senders = shard_edges(senders)
device_receivers = shard_edges(receivers)
# n_edge is a bit more difficult. Let's say we have a graphs tuple with
# n_edge [2, 8], and we want to distribute this on two devices. Then
# we will have sharded the edges to [5, 5], so the n_edge per device will be
# [2,3], and [5]. Since we need to have each of the n_edge the same shape,
# we will need to pad this to [5,0]. This is a bit dangerous, as the zero
# here has a different meaning to a graph with zero edges, but we need the
# zero for the global broadcasting to be correct for aggregation. Since
# this will only be used in the first instance for global broadcasting on
# device I think this is ok, but ideally we'd have a more elegant solution.
# TODO(jonathangodwin): think of a more elegant solution.
edges_per_device = np.sum(n_edge) // num_shards
edges_in_current_split = 0
completed_splits = []
current_split = {'n_edge': [], 'device_graph_idx': []}
for device_graph_idx, x in enumerate(n_edge):
new_edges_in_current_split = edges_in_current_split + x
if new_edges_in_current_split > edges_per_device:
# A single graph may be spread across multiple replicas, so here we
# iteratively create new splits until the graph is exhausted.
# How many edges we are trying to allocate.
carry = x
# How much room there is in the current split for new edges.
space_in_current_split = edges_per_device - edges_in_current_split
while carry > 0:
if carry >= space_in_current_split:
# We've encountered a situation where we need to split a graph across
# >= 2 devices. We compute the number we will carry to the next split,
# and add a full split.
carry = carry - space_in_current_split
# Add the left edges to the current split, and complete the split
# by adding it to completed_splits.
current_split['n_edge'].append(space_in_current_split)
current_split['device_graph_idx'].append(device_graph_idx)
completed_splits.append(current_split)
# reset the split
current_split = {'n_edge': [], 'device_graph_idx': []}
space_in_current_split = edges_per_device
edges_in_current_split = 0
else:
current_split = {
'n_edge': [carry],
'device_graph_idx': [device_graph_idx]
}
edges_in_current_split = carry
carry = 0
# Since the total number of edges must be divisible by the number
# of devices, this code path can only be executed for an intermediate
# graph, thus it is not a complete split and we never need to add it
# to `completed splits`.
else:
# Add the edges and globals to the current split.
current_split['n_edge'].append(x)
current_split['device_graph_idx'].append(device_graph_idx)
# If we've reached the end of a split, complete it and start a new one.
if new_edges_in_current_split == edges_per_device:
completed_splits.append(current_split)
current_split = {'n_edge': [], 'device_graph_idx': []}
edges_in_current_split = 0
else:
edges_in_current_split = new_edges_in_current_split
# Flatten list of dicts to dict of lists.
completed_splits = {
k: [d[k] for d in completed_splits] for k in completed_splits[0]
}
pad_split_to = max([len(x) for x in completed_splits['n_edge']])
pad = lambda x: np.pad(x, (0, pad_split_to - len(x)), mode='constant')
device_n_edge = np.array([pad(x) for x in completed_splits['n_edge']])
device_graph_idx = np.array(
[pad(x) for x in completed_splits['device_graph_idx']])
return ShardedEdgesGraphsTuple(
nodes=jax.tree_map(broadcast, nodes),
device_edges=edges,
device_receivers=device_receivers,
device_senders=device_senders,
receivers=broadcast(receivers),
senders=broadcast(senders),
device_graph_idx=device_graph_idx,
globals=jax.tree_map(broadcast, globals_),
n_node=broadcast(n_node),
n_edge=broadcast(n_edge),
device_n_edge=device_n_edge)
def broadcasted_sharded_graphs_tuple_to_graphs_tuple(sharded_graphs_tuple):
"""Converts a broadcasted ShardedGraphsTuple to a GraphsTuple."""
# We index the first element of replicated arrays, since they have been
# repeated. For edges, we reshape to recover all of the edge features.
unbroadcast = lambda y: tree.tree_map(lambda x: x[0], y)
unshard = lambda x: jnp.reshape(x, (x.shape[0] * x.shape[1],) + x.shape[2:])
# TODO(jonathangodwin): check senders and receivers are consistent.
return jraph.GraphsTuple(
nodes=unbroadcast(sharded_graphs_tuple.nodes),
edges=tree.tree_map(unshard, sharded_graphs_tuple.device_edges),
n_node=sharded_graphs_tuple.n_node[0],
n_edge=sharded_graphs_tuple.n_edge[0],
globals=unbroadcast(sharded_graphs_tuple.globals),
senders=sharded_graphs_tuple.senders[0],
receivers=sharded_graphs_tuple.receivers[0])
def sharded_segment_sum(data, indices, num_segments, axis_index_groups):
"""Segment sum over data on multiple devices."""
device_segment_sum = utils.segment_sum(data, indices, num_segments)
return jax.lax.psum(
device_segment_sum, axis_name='i', axis_index_groups=axis_index_groups)
ShardedEdgeFeatures = gn_graph.ArrayTree
AggregateShardedEdgesToGlobalsFn = Callable[
[ShardedEdgeFeatures, jnp.ndarray, int, jnp.ndarray], gn_graph.ArrayTree]
AggregateShardedEdgesToNodesFn = Callable[
[gn_graph.ArrayTree, jnp.ndarray, int, List[List[int]]], jraph.NodeFeatures]
# pylint: disable=invalid-name
def ShardedEdgesGraphNetwork(
update_edge_fn: Optional[jraph.GNUpdateEdgeFn],
update_node_fn: Optional[jraph.GNUpdateNodeFn],
update_global_fn: Optional[jraph.GNUpdateGlobalFn] = None,
aggregate_edges_for_nodes_fn:
AggregateShardedEdgesToNodesFn = sharded_segment_sum,
aggregate_nodes_for_globals_fn: jraph.AggregateNodesToGlobalsFn = jax.ops
.segment_sum,
aggregate_edges_for_globals_fn:
AggregateShardedEdgesToGlobalsFn = sharded_segment_sum,
attention_logit_fn: Optional[jraph.AttentionLogitFn] = None,
attention_reduce_fn: Optional[jraph.AttentionReduceFn] = None,
num_shards: int = 1):
"""Returns a method that applies a GraphNetwork on a sharded GraphsTuple.
This GraphNetwork is sharded over `edges`, all other features are assumed
to be replicated on device.
There are two clear use cases for a ShardedEdgesGraphNetwork. The first is
where a single graph can't fit on device. The second is when you are compute
bound on the edge feature calculation, and you'd like to speed up
training/inference by distributing the compute across devices.
Example usage:
```
gn = jax.pmap(ShardedEdgesGraphNetwork(update_edge_function,
update_node_function, **kwargs), axis_name='i')
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
sharded_graph = gn(sharded_graph)
```
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
nodes. This must support cross-device aggregations.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals. This must support cross-device aggregations.
attention_logit_fn: function used to calculate the attention weights or None
to deactivate attention mechanism.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
num_shards: how many devices per replica for sharding.
Returns:
A method that applies the configured GraphNetwork.
"""
not_both_supplied = lambda x, y: (x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
devices = jax.devices()
num_devices = len(devices)
assert num_devices % num_shards == 0
num_replicas = num_devices // num_shards
# The IDs within a replica.
replica_ids = list(range(num_devices))
# How the devices are grouped per replica.
axis_groups = [
replica_ids[i * num_shards:(i + 1) * num_shards]
for i in range(num_replicas)
]
def _ApplyGraphNet(graph: ShardedEdgesGraphsTuple) -> ShardedEdgesGraphsTuple:
"""Applies a configured GraphNetwork to a sharded graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, device_edges, device_receivers, device_senders, receivers, senders, globals_, device_n_edge, n_node, n_edge, device_graph_idx = graph
# Equivalent to jnp.sum(n_node), but jittable.
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
sum_device_n_edge = device_senders.shape[0]
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
sent_attributes = tree.tree_map(lambda n: n[device_senders], nodes)
received_attributes = tree.tree_map(lambda n: n[device_receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
global_edge_attributes = tree.tree_map(
lambda g: jnp.repeat(
g[device_graph_idx], device_n_edge, axis=0,
total_repeat_length=sum_device_n_edge),
globals_)
if update_edge_fn:
device_edges = update_edge_fn(device_edges, sent_attributes,
received_attributes, global_edge_attributes)
if attention_logit_fn:
logits = attention_logit_fn(device_edges, sent_attributes,
received_attributes, global_edge_attributes)
tree_calculate_weights = functools.partial(
utils.segment_softmax, segment_ids=receivers, num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
device_edges = attention_reduce_fn(device_edges, weights)
if update_node_fn:
# Aggregations over nodes are assumed to take place over devices
# specified by the axis_groups (e.g. with sharded_segment_sum).
sent_attributes = tree.tree_map(
lambda e: aggregate_edges_for_nodes_fn(e, device_senders, sum_n_node,
axis_groups), device_edges)
received_attributes = tree.tree_map(
lambda e: aggregate_edges_for_nodes_fn(
e, device_receivers, sum_n_node, axis_groups), device_edges)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
global_attributes = tree.tree_map(
lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
nodes = update_node_fn(nodes, sent_attributes, received_attributes,
global_attributes)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
device_graph_idx,
device_n_edge,
axis=0,
total_repeat_length=sum_device_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(e, edge_gr_idx, n_graph,
axis_groups), device_edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(node_attributes, edge_attribtutes, globals_)
# pylint: enable=g-long-lambda
return ShardedEdgesGraphsTuple(
nodes=nodes,
device_edges=device_edges,
device_senders=device_senders,
device_receivers=device_receivers,
receivers=receivers,
senders=senders,
device_graph_idx=device_graph_idx,
globals=globals_,
n_node=n_node,
n_edge=n_edge,
device_n_edge=device_n_edge)
return _ApplyGraphNet
# pylint: enable=invalid-name
| jraph-master | jraph/experimental/sharded_graphnet.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jraph.models."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.tree_util as tree
from jraph._src import graph
from jraph._src import models
from jraph._src import utils
import numpy as np
def _get_random_graph(max_n_graph=10):
n_graph = np.random.randint(1, max_n_graph + 1)
n_node = np.random.randint(0, 10, n_graph)
n_edge = np.random.randint(0, 20, n_graph)
# We cannot have any edges if there are no nodes.
n_edge[n_node == 0] = 0
senders = []
receivers = []
offset = 0
for n_node_in_graph, n_edge_in_graph in zip(n_node, n_edge):
if n_edge_in_graph != 0:
senders += list(
np.random.randint(0, n_node_in_graph, n_edge_in_graph) + offset)
receivers += list(
np.random.randint(0, n_node_in_graph, n_edge_in_graph) + offset)
offset += n_node_in_graph
return graph.GraphsTuple(
n_node=jnp.asarray(n_node),
n_edge=jnp.asarray(n_edge),
nodes=jnp.asarray(np.random.random(size=(np.sum(n_node), 4))),
edges=jnp.asarray(np.random.random(size=(np.sum(n_edge), 3))),
globals=jnp.asarray(np.random.random(size=(n_graph, 5))),
senders=jnp.asarray(senders),
receivers=jnp.asarray(receivers))
def _get_graph_network(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_edge_fn = lambda e, sn, rn, g: e
update_global_fn = lambda gn, ge, g: g
net = models.GraphNetwork(update_edge_fn,
update_node_fn,
update_global_fn)
return net(graphs_tuple)
def _get_graph_network_no_global_update(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_edge_fn = lambda e, sn, rn, g: e
update_global_fn = None
net = models.GraphNetwork(update_edge_fn,
update_node_fn,
update_global_fn)
return net(graphs_tuple)
def _get_graph_network_no_node_update(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = None
update_edge_fn = lambda e, sn, rn, g: e
update_global_fn = lambda gn, ge, g: g
net = models.GraphNetwork(update_edge_fn,
update_node_fn,
update_global_fn)
return net(graphs_tuple)
def _get_graph_network_no_edge_update(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_edge_fn = None
update_global_fn = lambda gn, ge, g: g
net = models.GraphNetwork(update_edge_fn,
update_node_fn,
update_global_fn)
return net(graphs_tuple)
def _get_attention_graph_network(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_edge_fn = lambda e, sn, rn, g: e
update_global_fn = lambda gn, ge, g: g
# Our attention logits are just one in this case.
attention_logit_fn = lambda e, sn, rn, g: jnp.array(1.0)
# We use a custom apply function here, which just returns the edge unchanged.
attention_reduce_fn = lambda e, w: e
net = models.GraphNetwork(update_edge_fn,
update_node_fn,
update_global_fn,
attention_logit_fn=attention_logit_fn,
attention_reduce_fn=attention_reduce_fn)
return net(graphs_tuple)
def _get_graph_gat(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_edge_fn = lambda e, sn, rn, g: e
update_global_fn = lambda gn, ge, g: g
# Our attention logits are just one in this case.
attention_logit_fn = lambda e, sn, rn, g: jnp.array(1.0)
# We use a custom apply function here, which just returns the edge unchanged.
attention_reduce_fn = lambda e, w: e
net = models.GraphNetGAT(update_edge_fn,
update_node_fn,
attention_logit_fn,
attention_reduce_fn,
update_global_fn)
return net(graphs_tuple)
def _get_multi_head_attention_graph_network(graphs_tuple):
# Our test update functions are just identity functions.
update_node_fn = lambda n, se, re, g: n
update_global_fn = lambda gn, ge, g: g
# With multi-head attention we have to return multiple edge features.
# Here we define 3 heads, all with the same message.
def update_edge_fn(e, unused_sn, unused_rn, unused_g):
return tree.tree_map(lambda e_: jnp.stack([e_, e_, e_]), e)
# Our attention logits are just the sum of the edge features of each head.
def attention_logit_fn(e, unused_sn, unused_rn, unused_g):
return tree.tree_map(lambda e_: jnp.sum(e_, axis=-1), e)
# For multi-head attention we need a custom apply attention function.
# In this we return the first edge.
def attention_reduce_fn(e, unused_w):
return tree.tree_map(lambda e_: e_[0], e)
net = models.GraphNetwork(jax.vmap(update_edge_fn),
jax.vmap(update_node_fn),
update_global_fn,
attention_logit_fn=jax.vmap(attention_logit_fn),
attention_reduce_fn=jax.vmap(attention_reduce_fn))
return net(graphs_tuple)
def _get_interaction_network(graphs_tuple):
update_node_fn = lambda n, r: jnp.concatenate((n, r), axis=-1)
update_edge_fn = lambda e, s, r: jnp.concatenate((e, s, r), axis=-1)
out = models.InteractionNetwork(update_edge_fn, update_node_fn)(graphs_tuple)
nodes, edges, receivers, senders, _, _, _ = graphs_tuple
expected_edges = jnp.concatenate(
(edges, nodes[senders], nodes[receivers]), axis=-1)
aggregated_nodes = utils.segment_sum(
expected_edges, receivers, num_segments=len(graphs_tuple.nodes))
expected_nodes = jnp.concatenate(
(nodes, aggregated_nodes), axis=-1)
expected_out = graphs_tuple._replace(
edges=expected_edges, nodes=expected_nodes)
return out, expected_out
def _get_graph_independent(graphs_tuple):
embed_fn = lambda x: x * 2
out = models.GraphMapFeatures(embed_fn, embed_fn, embed_fn)(graphs_tuple)
expected_out = graphs_tuple._replace(nodes=graphs_tuple.nodes*2,
edges=graphs_tuple.edges*2,
globals=graphs_tuple.globals*2)
return out, expected_out
def _get_relation_network(graphs_tuple):
edge_fn = lambda s, r: jnp.concatenate((s, r), axis=-1)
global_fn = lambda e: e*2
out = models.RelationNetwork(edge_fn, global_fn)(graphs_tuple)
expected_edges = jnp.concatenate(
(graphs_tuple.nodes[graphs_tuple.senders],
graphs_tuple.nodes[graphs_tuple.receivers]), axis=-1)
num_graphs = len(graphs_tuple.n_edge)
edge_gr_idx = jnp.repeat(jnp.arange(num_graphs),
graphs_tuple.n_edge,
total_repeat_length=graphs_tuple.edges.shape[0])
aggregated_edges = utils.segment_sum(
expected_edges, edge_gr_idx, num_segments=num_graphs)
expected_out = graphs_tuple._replace(
edges=expected_edges, globals=aggregated_edges*2)
return out, expected_out
def _get_deep_sets(graphs_tuple):
node_fn = lambda n, g: jnp.concatenate((n, g), axis=-1)
global_fn = lambda n: n*2
out = models.DeepSets(node_fn, global_fn)(graphs_tuple)
num_graphs = len(graphs_tuple.n_node)
num_nodes = len(graphs_tuple.nodes)
broadcasted_globals = jnp.repeat(graphs_tuple.globals, graphs_tuple.n_node,
total_repeat_length=num_nodes, axis=0)
expected_nodes = jnp.concatenate(
(graphs_tuple.nodes, broadcasted_globals), axis=-1)
node_gr_idx = jnp.repeat(jnp.arange(num_graphs),
graphs_tuple.n_node,
total_repeat_length=num_nodes)
expected_out = graphs_tuple._replace(
nodes=expected_nodes,
globals=utils.segment_sum(
expected_nodes, node_gr_idx, num_segments=num_graphs)*2)
return out, expected_out
def _get_gat(graphs_tuple):
# With multi-head attention we have to return multiple edge features.
# Here we define 3 heads, all with the same message.
def attention_query_fn(n):
return tree.tree_map(lambda n_: jnp.stack([n_, n_, n_], axis=2), n)
# Our attention logits 1 if a self edge
def attention_logit_fn(s, r, e_):
del e_
return (s == r)*1 + (s != r)*-1e10
def node_update_fn(nodes):
return jnp.mean(nodes, axis=2)
net = models.GAT(attention_query_fn, attention_logit_fn, node_update_fn)
# Cast nodes to floats since GAT will output floats from the softmax
# attention.
graphs_tuple = graphs_tuple._replace(
nodes=jnp.array(graphs_tuple.nodes, jnp.float32))
return net(graphs_tuple), graphs_tuple
class ModelsTest(parameterized.TestCase):
def _make_nest(self, array):
"""Returns a nest given an array."""
return {'a': array,
'b': [jnp.ones_like(array), {'c': jnp.zeros_like(array)}]}
def _get_list_and_batched_graph(self):
"""Returns a list of individual graphs and a batched version.
This test-case includes the following corner-cases:
- single node,
- multiple nodes,
- no edges,
- single edge,
- and multiple edges.
"""
batched_graph = graph.GraphsTuple(
n_node=jnp.array([1, 3, 1, 0, 2, 0, 0]),
n_edge=jnp.array([2, 5, 0, 0, 1, 0, 0]),
nodes=self._make_nest(jnp.arange(14).reshape(7, 2)),
edges=self._make_nest(jnp.arange(24).reshape(8, 3)),
globals=self._make_nest(jnp.arange(14).reshape(7, 2)),
senders=jnp.array([0, 0, 1, 1, 2, 3, 3, 6]),
receivers=jnp.array([0, 0, 2, 1, 3, 2, 1, 5]))
list_graphs = [
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([2]),
nodes=self._make_nest(jnp.array([[0, 1]])),
edges=self._make_nest(jnp.array([[0, 1, 2], [3, 4, 5]])),
globals=self._make_nest(jnp.array([[0, 1]])),
senders=jnp.array([0, 0]),
receivers=jnp.array([0, 0])),
graph.GraphsTuple(
n_node=jnp.array([3]),
n_edge=jnp.array([5]),
nodes=self._make_nest(jnp.array([[2, 3], [4, 5], [6, 7]])),
edges=self._make_nest(
jnp.array([[6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17],
[18, 19, 20]])),
globals=self._make_nest(jnp.array([[2, 3]])),
senders=jnp.array([0, 0, 1, 2, 2]),
receivers=jnp.array([1, 0, 2, 1, 0])),
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([0]),
nodes=self._make_nest(jnp.array([[8, 9]])),
edges=self._make_nest(jnp.zeros((0, 3))),
globals=self._make_nest(jnp.array([[4, 5]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=self._make_nest(jnp.zeros((0, 2))),
edges=self._make_nest(jnp.zeros((0, 3))),
globals=self._make_nest(jnp.array([[6, 7]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([2]),
n_edge=jnp.array([1]),
nodes=self._make_nest(jnp.array([[10, 11], [12, 13]])),
edges=self._make_nest(jnp.array([[21, 22, 23]])),
globals=self._make_nest(jnp.array([[8, 9]])),
senders=jnp.array([1]),
receivers=jnp.array([0])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=self._make_nest(jnp.zeros((0, 2))),
edges=self._make_nest(jnp.zeros((0, 3))),
globals=self._make_nest(jnp.array([[10, 11]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=self._make_nest(jnp.zeros((0, 2))),
edges=self._make_nest(jnp.zeros((0, 3))),
globals=self._make_nest(jnp.array([[12, 13]])),
senders=jnp.array([]),
receivers=jnp.array([]))
]
return list_graphs, batched_graph
@parameterized.parameters(_get_graph_network,
_get_graph_network_no_node_update,
_get_graph_network_no_edge_update,
_get_graph_network_no_global_update,
_get_attention_graph_network,
_get_multi_head_attention_graph_network,
_get_graph_gat)
def test_connect_graphnetwork(self, network_fn):
_, batched_graphs_tuple = self._get_list_and_batched_graph()
with self.subTest('nojit'):
out = network_fn(batched_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out,
batched_graphs_tuple)
with self.subTest('jit'):
out = jax.jit(network_fn)(batched_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out,
batched_graphs_tuple)
@parameterized.parameters(_get_graph_network,
_get_graph_network_no_node_update,
_get_graph_network_no_edge_update,
_get_graph_network_no_global_update)
def test_connect_graphnetwork_nones(self, network_fn):
batched_graphs_tuple = graph.GraphsTuple(
n_node=jnp.array([1, 3, 1, 0, 2, 0, 0]),
n_edge=jnp.array([2, 5, 0, 0, 1, 0, 0]),
nodes=self._make_nest(jnp.arange(14).reshape(7, 2)),
edges=self._make_nest(jnp.arange(24).reshape(8, 3)),
globals=self._make_nest(jnp.arange(14).reshape(7, 2)),
senders=jnp.array([0, 0, 1, 1, 2, 3, 3, 6]),
receivers=jnp.array([0, 0, 2, 1, 3, 2, 1, 5]))
for name, graphs_tuple in [
('no_globals', batched_graphs_tuple._replace(globals=None)),
('empty_globals', batched_graphs_tuple._replace(globals=[])),
('no_edges', batched_graphs_tuple._replace(edges=None)),
('empty_edges', batched_graphs_tuple._replace(edges=[])),
]:
with self.subTest(name + '_nojit'):
out = network_fn(graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out, graphs_tuple)
with self.subTest(name + '_jit'):
out = jax.jit(network_fn)(graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out, graphs_tuple)
@parameterized.parameters(_get_interaction_network,
_get_graph_independent,
_get_gat,
_get_relation_network,
_get_deep_sets)
def test_connect_gnns(self, network_fn):
batched_graphs_tuple = graph.GraphsTuple(
n_node=jnp.array([1, 3, 1, 0, 2, 0, 0]),
n_edge=jnp.array([1, 7, 1, 0, 3, 0, 0]),
nodes=jnp.arange(14).reshape(7, 2),
edges=jnp.arange(36).reshape(12, 3),
globals=jnp.arange(14).reshape(7, 2),
senders=jnp.array([0, 1, 2, 3, 4, 5, 6, 1, 2, 3, 3, 6]),
receivers=jnp.array([0, 1, 2, 3, 4, 5, 6, 2, 3, 2, 1, 5]))
with self.subTest('nojit'):
out, expected_out = network_fn(batched_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out, expected_out)
with self.subTest('jit'):
out, expected_out = jax.jit(network_fn)(batched_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, out, expected_out)
def test_graphnetwork_attention_error(self):
with self.assertRaisesRegex(
ValueError, ('attention_logit_fn and attention_reduce_fn '
'must both be supplied.')):
models.GraphNetwork(update_edge_fn=None, update_node_fn=None,
attention_logit_fn=lambda x: x,
attention_reduce_fn=None)
with self.assertRaisesRegex(
ValueError, ('attention_logit_fn and attention_reduce_fn '
'must both be supplied.')):
models.GraphNetwork(update_edge_fn=None, update_node_fn=None,
attention_logit_fn=None,
attention_reduce_fn=lambda x: x)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/_src/models_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jraph.utils."""
import functools
import os
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax.lib import xla_bridge
import jax.numpy as jnp
import jax.tree_util as tree
from jraph._src import graph
from jraph._src import utils
import numpy as np
def _get_random_graph(max_n_graph=10,
include_node_features=True,
include_edge_features=True,
include_globals=True):
n_graph = np.random.randint(1, max_n_graph + 1)
n_node = np.random.randint(0, 10, n_graph)
n_edge = np.random.randint(0, 20, n_graph)
# We cannot have any edges if there are no nodes.
n_edge[n_node == 0] = 0
senders = []
receivers = []
offset = 0
for n_node_in_graph, n_edge_in_graph in zip(n_node, n_edge):
if n_edge_in_graph != 0:
senders += list(
np.random.randint(0, n_node_in_graph, n_edge_in_graph) + offset)
receivers += list(
np.random.randint(0, n_node_in_graph, n_edge_in_graph) + offset)
offset += n_node_in_graph
if include_globals:
global_features = jnp.asarray(np.random.random(size=(n_graph, 5)))
else:
global_features = None
if include_node_features:
nodes = jnp.asarray(np.random.random(size=(np.sum(n_node), 4)))
else:
nodes = None
if include_edge_features:
edges = jnp.asarray(np.random.random(size=(np.sum(n_edge), 3)))
else:
edges = None
return graph.GraphsTuple(
n_node=jnp.asarray(n_node),
n_edge=jnp.asarray(n_edge),
nodes=nodes,
edges=edges,
globals=global_features,
senders=jnp.asarray(senders),
receivers=jnp.asarray(receivers))
def _make_nest(array):
"""Returns a nest given an array."""
return {'a': array,
'b': [jnp.ones_like(array), {'c': jnp.zeros_like(array)}]}
def _get_list_and_batched_graph():
"""Returns a list of individual graphs and a batched version.
This test-case includes the following corner-cases:
- single node,
- multiple nodes,
- no edges,
- single edge,
- and multiple edges.
"""
batched_graph = graph.GraphsTuple(
n_node=jnp.array([1, 3, 1, 0, 2, 0, 0]),
n_edge=jnp.array([2, 5, 0, 0, 1, 0, 0]),
nodes=_make_nest(jnp.arange(14).reshape(7, 2)),
edges=_make_nest(jnp.arange(24).reshape(8, 3)),
globals=_make_nest(jnp.arange(14).reshape(7, 2)),
senders=jnp.array([0, 0, 1, 1, 2, 3, 3, 6]),
receivers=jnp.array([0, 0, 2, 1, 3, 2, 1, 5]))
list_graphs = [
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([2]),
nodes=_make_nest(jnp.array([[0, 1]])),
edges=_make_nest(jnp.array([[0, 1, 2], [3, 4, 5]])),
globals=_make_nest(jnp.array([[0, 1]])),
senders=jnp.array([0, 0]),
receivers=jnp.array([0, 0])),
graph.GraphsTuple(
n_node=jnp.array([3]),
n_edge=jnp.array([5]),
nodes=_make_nest(jnp.array([[2, 3], [4, 5], [6, 7]])),
edges=_make_nest(
jnp.array([[6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17],
[18, 19, 20]])),
globals=_make_nest(jnp.array([[2, 3]])),
senders=jnp.array([0, 0, 1, 2, 2]),
receivers=jnp.array([1, 0, 2, 1, 0])),
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([0]),
nodes=_make_nest(jnp.array([[8, 9]])),
edges=_make_nest(jnp.zeros((0, 3))),
globals=_make_nest(jnp.array([[4, 5]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=_make_nest(jnp.zeros((0, 2))),
edges=_make_nest(jnp.zeros((0, 3))),
globals=_make_nest(jnp.array([[6, 7]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([2]),
n_edge=jnp.array([1]),
nodes=_make_nest(jnp.array([[10, 11], [12, 13]])),
edges=_make_nest(jnp.array([[21, 22, 23]])),
globals=_make_nest(jnp.array([[8, 9]])),
senders=jnp.array([1]),
receivers=jnp.array([0])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=_make_nest(jnp.zeros((0, 2))),
edges=_make_nest(jnp.zeros((0, 3))),
globals=_make_nest(jnp.array([[10, 11]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=_make_nest(jnp.zeros((0, 2))),
edges=_make_nest(jnp.zeros((0, 3))),
globals=_make_nest(jnp.array([[12, 13]])),
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([]),
n_edge=jnp.array([]),
nodes=_make_nest(jnp.zeros((0, 2))),
edges=_make_nest(jnp.zeros((0, 3))),
globals=_make_nest(jnp.zeros((0, 2))),
senders=jnp.array([]),
receivers=jnp.array([])),
]
return list_graphs, batched_graph
def _get_list_matrix():
"""Returns a list of adjacency matrices, and its sparse and graph versions.
This test-case includes the following corner-cases:
- single node,
- multiple nodes,
- no edges,
- single edge,
- and multiple edges.
"""
adj_matrices = [
np.array([[2]]),
np.array([[1, 1, 0], [0, 0, 1], [1, 1, 0]]),
np.array([[0]]),
np.array([[]]),
np.array([[0, 0], [1, 0]]),
]
# Sparse version of the above adjacency matrix.
sparse_coo_matrices = [
# (row, column, values, n_node)
(np.array([0]), np.array([0]), np.array([2]), np.array([1])),
(np.array([0, 0, 1, 2, 2]), np.array([0, 1, 2, 0, 1]),
np.array([1, 1, 1, 1, 1]), np.array(3)),
(np.array([]), np.array([]), np.array([]), np.array(1)),
(np.array([]), np.array([]), np.array([]), np.array(0)),
(np.array([1]), np.array([0]), np.array([1]), np.array(2)),
]
expected_graphs = [
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([2]),
nodes=None, edges=None, globals=None,
senders=jnp.array([0, 0]),
receivers=jnp.array([0, 0])),
graph.GraphsTuple(
n_node=jnp.array([3]),
n_edge=jnp.array([5]),
nodes=None, edges=None, globals=None,
senders=jnp.array([0, 0, 1, 2, 2]),
receivers=jnp.array([0, 1, 2, 0, 1])),
graph.GraphsTuple(
n_node=jnp.array([1]),
n_edge=jnp.array([0]),
nodes=None, edges=None, globals=None,
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([0]),
n_edge=jnp.array([0]),
nodes=None, edges=None, globals=None,
senders=jnp.array([]),
receivers=jnp.array([])),
graph.GraphsTuple(
n_node=jnp.array([2]),
n_edge=jnp.array([1]),
nodes=None, edges=None, globals=None,
senders=jnp.array([1]),
receivers=jnp.array([0])),
]
return adj_matrices, sparse_coo_matrices, expected_graphs
class GraphTest(parameterized.TestCase):
def test_batch(self):
"""Tests batching of graph."""
list_graphs_tuple, batched_graphs_tuple = _get_list_and_batched_graph()
graphs_tuple = utils.batch(list_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, graphs_tuple,
batched_graphs_tuple)
def test_unbatch(self):
"""Tests unbatching of graph."""
list_graphs_tuple, batched_graphs_tuple = _get_list_and_batched_graph()
graphs_tuples = utils.unbatch(batched_graphs_tuple)
# The final GraphsTuple does not contain a graph, and so shouldn't be
# present in the result.
jax.tree_util.tree_map(np.testing.assert_allclose, graphs_tuples,
list_graphs_tuple[:-1])
def test_batch_np(self):
"""Tests batching of graph in numpy."""
(list_graphs_tuple, batched_graphs_tuple) = _get_list_and_batched_graph()
graphs_tuple = utils.batch_np(list_graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_allclose, graphs_tuple,
batched_graphs_tuple)
def test_unbatch_np(self):
"""Tests unbatching of graph in numpy."""
(list_graphs_tuple, batched_graphs_tuple) = _get_list_and_batched_graph()
graphs_tuples = utils.unbatch_np(batched_graphs_tuple)
# The final GraphsTuple does not contain a graph, and so shouldn't be
# present in the result.
jax.tree_util.tree_map(np.testing.assert_allclose, graphs_tuples,
list_graphs_tuple[:-1])
@parameterized.parameters((True, True, False),
(True, False, True),
(False, True, True))
def test_batch_unbatch_with_random_graphs(self,
include_globals,
include_nodes,
include_edges):
"""Tests batch(unbatch) is identity with random graphs."""
np.random.seed(42)
for _ in range(100):
g = _get_random_graph(include_globals=include_globals,
include_node_features=include_nodes,
include_edge_features=include_edges)
jax.tree_util.tree_map(np.testing.assert_allclose,
utils.batch(utils.unbatch(g)), g)
for _ in range(10):
rg = lambda: _get_random_graph( # pylint: disable=g-long-lambda
1, include_nodes, include_edges, include_globals)
graphs1 = [rg() for _ in range(np.random.randint(1, 10))]
graphs2 = utils.unbatch(utils.batch(graphs1))
for g1, g2 in zip(graphs1, graphs2):
jax.tree_util.tree_map(np.testing.assert_allclose, g1, g2)
def test_pad_with_graphs(self):
"""Tests padding of graph."""
_, graphs_tuple = _get_list_and_batched_graph()
padded_graphs_tuple = utils.pad_with_graphs(graphs_tuple, 10, 12, 9)
expected_padded_graph = graph.GraphsTuple(
n_node=jnp.concatenate([graphs_tuple.n_node,
jnp.array([3, 0])]),
n_edge=jnp.concatenate([graphs_tuple.n_edge,
jnp.array([4, 0])]),
nodes=tree.tree_map(
lambda f: jnp.concatenate([f, jnp.zeros((3, 2), dtype=f.dtype)]),
graphs_tuple.nodes),
edges=tree.tree_map(
lambda f: jnp.concatenate([f, jnp.zeros((4, 3), dtype=f.dtype)]),
graphs_tuple.edges),
globals=tree.tree_map(
lambda f: jnp.concatenate([f, jnp.zeros((2, 2), dtype=f.dtype)]),
graphs_tuple.globals),
senders=jnp.concatenate([graphs_tuple.senders,
jnp.array([7, 7, 7, 7])]),
receivers=jnp.concatenate(
[graphs_tuple.receivers,
jnp.array([7, 7, 7, 7])]),
)
jax.tree_util.tree_map(np.testing.assert_allclose, padded_graphs_tuple,
expected_padded_graph)
def test_unpad(self):
"""Tests unpadding of graph."""
_, graphs_tuple = _get_list_and_batched_graph()
unpadded_graphs_tuple = utils.unpad_with_graphs(graphs_tuple)
expected_unpadded_graph = graph.GraphsTuple(
n_node=jnp.array([1, 3, 1, 0]),
n_edge=jnp.array([2, 5, 0, 0]),
nodes=_make_nest(jnp.arange(10).reshape(5, 2)),
edges=_make_nest(jnp.arange(21).reshape(7, 3)),
globals=_make_nest(jnp.arange(8).reshape(4, 2)),
senders=jnp.array([0, 0, 1, 1, 2, 3, 3]),
receivers=jnp.array([0, 0, 2, 1, 3, 2, 1]))
jax.tree_util.tree_map(np.testing.assert_allclose, unpadded_graphs_tuple,
expected_unpadded_graph)
@parameterized.parameters((True, True, False),
(True, False, True),
(False, True, True))
def test_pad_unpad_with_random_graphs(self,
include_globals,
include_nodes,
include_edges):
"""Tests unpad(pad) is identity with random graphs."""
np.random.seed(42)
for _ in range(100):
g = _get_random_graph(include_globals=include_globals,
include_node_features=include_nodes,
include_edge_features=include_edges)
jax.tree_util.tree_map(
np.testing.assert_allclose,
utils.unpad_with_graphs(utils.pad_with_graphs(g, 101, 200, 11)), g)
def test_pad_unpad_with_graphs_exact_padding(self):
"""Tests unpad(pad) is identity with random graphs."""
g = _get_random_graph(include_globals=True,
include_node_features=True,
include_edge_features=True)
recovered_g = utils.unpad_with_graphs(utils.pad_with_graphs(
g,
n_node=g.n_node.sum() + 1,
n_edge=g.n_edge.sum(),
n_graph=g.n_node.shape[0] + 1))
jax.tree_util.tree_map(np.testing.assert_allclose, recovered_g, g)
def test_get_number_of_padding_with_graphs_graphs(self):
"""Tests the number of padding graphs calculation."""
_, graphs_tuple = _get_list_and_batched_graph()
expected = 3
with self.subTest('nojit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
utils.get_number_of_padding_with_graphs_graphs(graphs_tuple),
expected)
with self.subTest('jit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
jax.jit(utils.get_number_of_padding_with_graphs_graphs)(graphs_tuple),
expected)
def test_get_number_of_padding_with_graphs_nodes(self):
"""Tests the number of padding nodes calculation."""
_, graphs_tuple = _get_list_and_batched_graph()
expected = 2
with self.subTest('nojit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
utils.get_number_of_padding_with_graphs_nodes(graphs_tuple), expected)
with self.subTest('jit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
jax.jit(utils.get_number_of_padding_with_graphs_nodes)(graphs_tuple),
expected)
def test_get_number_of_padding_with_graphs_edges(self):
"""Tests the number of padding edges calculation."""
_, graphs_tuple = _get_list_and_batched_graph()
expected = 1
with self.subTest('nojit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
utils.get_number_of_padding_with_graphs_edges(graphs_tuple), expected)
with self.subTest('jit'):
jax.tree_util.tree_map(
np.testing.assert_allclose,
jax.jit(utils.get_number_of_padding_with_graphs_edges)(graphs_tuple),
expected)
def test_get_node_padding_mask(self):
"""Tests construction of node padding mask."""
_, graphs_tuple = _get_list_and_batched_graph()
expected_mask = jnp.array([1, 1, 1, 1, 1, 0, 0]).astype(bool)
with self.subTest('nojit'):
mask = utils.get_node_padding_mask(graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_array_equal, mask, expected_mask)
with self.subTest('jit'):
mask = jax.jit(utils.get_node_padding_mask)(graphs_tuple)
jax.tree_util.tree_map(np.testing.assert_array_equal, mask, expected_mask)
def test_get_edge_padding_mask(self):
"""Tests construction of edge padding mask."""
_, graphs_tuple = _get_list_and_batched_graph()
expected_mask = jnp.array([1, 1, 1, 1, 1, 1, 1, 0]).astype(bool)
with self.subTest('nojit'):
mask = utils.get_edge_padding_mask(graphs_tuple)
np.testing.assert_array_equal(mask, expected_mask)
with self.subTest('jit'):
mask = jax.jit(utils.get_edge_padding_mask)(graphs_tuple)
np.testing.assert_array_equal(mask, expected_mask)
def test_get_graph_padding_mask(self):
"""Tests construction of graph padding mask."""
_, graphs_tuple = _get_list_and_batched_graph()
expected_mask = jnp.array([1, 1, 1, 1, 0, 0, 0]).astype(bool)
with self.subTest('nojit'):
mask = utils.get_graph_padding_mask(graphs_tuple)
np.testing.assert_array_equal(mask, expected_mask)
with self.subTest('jit'):
mask = jax.jit(utils.get_graph_padding_mask)(graphs_tuple)
np.testing.assert_array_equal(mask, expected_mask)
def test_segment_sum(self):
result = utils.segment_sum(
jnp.arange(9), jnp.array([0, 1, 2, 0, 4, 0, 1, 1, 0]), 6)
np.testing.assert_allclose(result, jnp.array([16, 14, 2, 0, 4, 0]))
def test_segment_sum_optional_num_segments(self):
result = utils.segment_sum(
jnp.arange(9), jnp.array([0, 1, 2, 0, 4, 0, 1, 1, 0]))
np.testing.assert_allclose(result, jnp.array([16, 14, 2, 0, 4]))
@parameterized.parameters((True,), (False,))
def test_segment_mean(self, nan_data):
data = jnp.arange(9, dtype=jnp.float32)
expected_out = jnp.array([4, 14 / 3.0, 2, 0, 4, 0])
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, 0])
if nan_data:
data = data.at[0].set(jnp.nan)
expected_out = expected_out.at[segment_ids[0]].set(jnp.nan)
result = utils.segment_mean(data, segment_ids, 6)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((True,), (False,))
def test_segment_variance(self, nan_data):
data = jnp.arange(8, dtype=jnp.float32)
expected_out = jnp.stack([jnp.var(jnp.arange(3)),
jnp.var(jnp.arange(3, 5)),
jnp.var(jnp.arange(5, 8))])
segment_ids = jnp.array([0, 0, 0, 1, 1, 2, 2, 2])
if nan_data:
data = data.at[0].set(jnp.nan)
expected_out = expected_out.at[segment_ids[0]].set(jnp.nan)
result = utils.segment_variance(data, segment_ids, 3)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((True,), (False,))
def test_segment_normalize(self, nan_data):
norm = lambda x: (x - jnp.mean(x)) * jax.lax.rsqrt(jnp.var(x))
data = jnp.arange(8, dtype=jnp.float32)
segment_ids = jnp.array([0, 0, 0, 1, 1, 2, 2, 2])
expected_out = jnp.concatenate(
[norm(jnp.arange(3, dtype=jnp.float32)),
norm(jnp.arange(3, 5, dtype=jnp.float32)),
norm(jnp.arange(5, 8, dtype=jnp.float32))])
if nan_data:
data = data.at[0].set(jnp.nan)
expected_out = expected_out.at[:3].set(jnp.nan)
result = utils.segment_normalize(data, segment_ids, 3)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((False, False),
(True, False),
(True, True),
(False, True),
(False, True))
def test_segment_max(self, indices_are_sorted, unique_indices):
neg_inf = jnp.iinfo(jnp.int32).min
if unique_indices:
data = jnp.arange(6)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array([0, 1, 2, 3, 4, 5])
num_segments = 6
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array([1, 0, 2, 4, 3])
num_segments = 5
else:
data = jnp.arange(9)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array([2, 5, 6, 7, 8, neg_inf])
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array([5, 7, 2, neg_inf, 4, neg_inf])
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_max(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
result = utils.segment_max(data, segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segments = jnp.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segments])
with self.subTest('jit'):
result = jax.jit(utils.segment_max, static_argnums=(2, 3, 4))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((False, False), (True, False),
(True, True), (False, True),
(False, True))
def test_segment_max_or_constant(self, indices_are_sorted, unique_indices):
if unique_indices:
data = jnp.arange(6, dtype=jnp.float32)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array([0, 1, 2, 3, 4, 5, 0], dtype=jnp.float32)
num_segments = 7
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array([1, 0, 2, 4, 3], dtype=jnp.float32)
num_segments = 5
else:
data = jnp.arange(9, dtype=jnp.float32)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array([2, 5, 6, 7, 8, 0], dtype=jnp.float32)
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array([5, 7, 2, 0, 4, 0], dtype=jnp.float32)
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_max_or_constant(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
grad = jax.grad(lambda *x: jnp.sum(utils.segment_max_or_constant(*x)))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
assert np.all(jnp.isfinite(grad))
result = utils.segment_max_or_constant(
data,
segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segments = jnp.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segments])
with self.subTest('jit'):
result = jax.jit(
utils.segment_max_or_constant,
static_argnums=(2, 3, 4))(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
grad_fn = jax.jit(
jax.grad(lambda *x: jnp.sum(utils.segment_max_or_constant(*x))),
static_argnums=(2, 3, 4))
grad = grad_fn(data, segment_ids, num_segments, indices_are_sorted,
unique_indices)
assert np.all(jnp.isfinite(grad))
@parameterized.parameters((False, False), (True, False), (True, True),
(False, True))
def test_segment_max_or_constant_2d(self, indices_are_sorted, unique_indices):
if unique_indices:
data = jnp.stack([jnp.arange(6), jnp.arange(6, 0, -1)], axis=1)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array(
[[0, 6], [1, 5], [2, 4], [3, 3], [4, 2], [5, 1]])
num_segments = 6
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array(
[[1, 5], [0, 6], [2, 4], [4, 2], [3, 3]])
num_segments = 5
else:
data = jnp.stack([jnp.arange(9), jnp.arange(9, 0, -1)], axis=1)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array(
[[2, 9], [5, 6], [6, 3], [7, 2], [8, 1], [0, 0]])
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array(
[[5, 9], [7, 8], [2, 7], [0, 0], [4, 5], [0, 0]])
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_max_or_constant(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
result = utils.segment_max_or_constant(
data,
segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segments = jnp.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segments])
with self.subTest('jit'):
result = jax.jit(utils.segment_max_or_constant, static_argnums=(2, 3, 4))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((False, False),
(True, False),
(True, True),
(False, True))
def test_segment_min(self, indices_are_sorted, unique_indices):
inf = jnp.iinfo(jnp.int32).max
if unique_indices:
data = jnp.arange(6)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array([0, 1, 2, 3, 4, 5])
num_segments = 6
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array([1, 0, 2, 4, 3])
num_segments = 5
else:
data = jnp.arange(9)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array([0, 3, 6, 7, 8, inf])
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array([0, 1, 2, inf, 4, inf])
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_min(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
result = utils.segment_min(data, segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segment = np.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segment])
with self.subTest('jit'):
result = jax.jit(utils.segment_min, static_argnums=(2, 3, 4))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((False, False), (True, False), (True, True),
(False, True))
def test_segment_min_or_constant(self, indices_are_sorted, unique_indices):
if unique_indices:
data = jnp.arange(6, dtype=jnp.float32)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array([0, 1, 2, 3, 4, 5], dtype=jnp.float32)
num_segments = 6
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array([1, 0, 2, 4, 3], dtype=jnp.float32)
num_segments = 5
else:
data = jnp.arange(9, dtype=jnp.float32)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array([0, 3, 6, 7, 8, 0], dtype=jnp.float32)
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array([0, 1, 2, 0, 4, 0], dtype=jnp.float32)
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_min_or_constant(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
grad = jax.grad(lambda *x: jnp.sum(utils.segment_min_or_constant(*x)))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
assert np.all(jnp.isfinite(grad))
result = utils.segment_min_or_constant(
data,
segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segments = jnp.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segments])
with self.subTest('jit'):
result = jax.jit(
utils.segment_min_or_constant,
static_argnums=(2, 3, 4))(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
grad_fn = jax.jit(
jax.grad(lambda *x: jnp.sum(utils.segment_min_or_constant(*x))),
static_argnums=(2, 3, 4))
grad = grad_fn(data, segment_ids, num_segments, indices_are_sorted,
unique_indices)
assert np.all(jnp.isfinite(grad))
@parameterized.parameters((False, False), (True, False), (True, True),
(False, True))
def test_segment_min_or_constant_2d(self, indices_are_sorted, unique_indices):
if unique_indices:
data = jnp.stack([jnp.arange(6), jnp.arange(6, 0, -1)], axis=1)
if indices_are_sorted:
segment_ids = jnp.array([0, 1, 2, 3, 4, 5])
expected_out = jnp.array(
[[0, 6], [1, 5], [2, 4], [3, 3], [4, 2], [5, 1]])
num_segments = 6
else:
segment_ids = jnp.array([1, 0, 2, 4, 3, -5])
expected_out = jnp.array(
[[1, 5], [0, 6], [2, 4], [4, 2], [3, 3]])
num_segments = 5
else:
data = jnp.stack([jnp.arange(9), jnp.arange(9, 0, -1)], axis=1)
if indices_are_sorted:
segment_ids = jnp.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
expected_out = jnp.array(
[[0, 7], [3, 4], [6, 3], [7, 2], [8, 1], [0, 0]])
else:
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, -6])
expected_out = jnp.array(
[[0, 4], [1, 2], [2, 7], [0, 0], [4, 5], [0, 0]])
num_segments = 6
with self.subTest('nojit'):
result = utils.segment_min_or_constant(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
result = utils.segment_min_or_constant(
data,
segment_ids,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
num_unique_segments = jnp.max(segment_ids) + 1
np.testing.assert_allclose(result, expected_out[:num_unique_segments])
with self.subTest('jit'):
result = jax.jit(utils.segment_min_or_constant, static_argnums=(2, 3, 4))(
data, segment_ids, num_segments, indices_are_sorted, unique_indices)
np.testing.assert_allclose(result, expected_out)
@parameterized.parameters((True,), (False,))
def test_segment_softmax(self, nan_data):
data = jnp.arange(9, dtype=jnp.float32)
segment_ids = jnp.array([0, 1, 2, 0, 4, 0, 1, 1, 0])
num_segments = 6
expected_out = jnp.array([3.1741429e-04, 1.8088353e-03, 1.0000000e+00,
6.3754367e-03, 1.0000000e+00, 4.7108460e-02,
2.6845494e-01, 7.2973621e-01, 9.4619870e-01])
if nan_data:
data = data.at[0].set(jnp.nan)
expected_out = expected_out.at[np.array([0, 3, 5, 8])].set(jnp.nan)
with self.subTest('nojit'):
result = utils.segment_softmax(data, segment_ids, num_segments)
np.testing.assert_allclose(result, expected_out)
result = utils.segment_softmax(data, segment_ids)
np.testing.assert_allclose(result, expected_out)
with self.subTest('jit'):
result = jax.jit(
utils.segment_softmax, static_argnums=2)(data, segment_ids,
num_segments)
np.testing.assert_allclose(result, expected_out)
def test_partition_softmax(self):
data = jnp.arange(9)
partitions = jnp.array([3, 2, 4])
sum_partitions = 9
expected_out = np.array([0.090031, 0.244728, 0.665241, 0.268941, 0.731059,
0.032059, 0.087144, 0.236883, 0.643914])
with self.subTest('nojit'):
result = utils.partition_softmax(data, partitions, sum_partitions)
jax.tree_util.tree_map(
functools.partial(np.testing.assert_allclose, atol=1E-5, rtol=1E-5),
result, expected_out)
result = utils.partition_softmax(data, partitions)
jax.tree_util.tree_map(
functools.partial(np.testing.assert_allclose, atol=1E-5, rtol=1E-5),
result, expected_out)
with self.subTest('jit'):
result = jax.jit(utils.partition_softmax, static_argnums=2)(
data, partitions, sum_partitions)
jax.tree_util.tree_map(
functools.partial(np.testing.assert_allclose, atol=1E-5, rtol=1E-5),
result, expected_out)
@parameterized.named_parameters(('valid_1_no_feat', 1, 1, False, False),
('valid_5_no_feat', 5, 5, False, False),
('valid_1_nodes', 1, 1, True, False),
('valid_5_globals', 5, 5, False, True),
('valid_5_both', 5, 5, True, True),
('zero_nodes', 0, 1, False, False),
('zero_graphs', 1, 0, False, False),)
def test_fully_connected_graph(self, n_node, n_graph, nodes, globals_):
node_feat = np.random.rand(n_node*n_graph, 32) if nodes else None
global_feat = np.random.rand(n_graph, 32) if globals_ else None
with self.subTest('nojit'):
result = utils.get_fully_connected_graph(
n_node, n_graph, node_feat, global_feat)
if nodes:
self.assertLen(result.nodes, n_node*n_graph)
if globals_:
self.assertLen(result.globals, n_graph)
self.assertLen(result.senders, n_node**2 * n_graph)
self.assertLen(result.receivers, n_node**2 * n_graph)
np.testing.assert_allclose(result.n_node, jnp.array([n_node] * n_graph))
with self.subTest('jit'):
result = jax.jit(utils.get_fully_connected_graph, static_argnums=[0, 1])(
n_node, n_graph, node_feat, global_feat)
if nodes:
self.assertLen(result.nodes, n_node*n_graph)
if globals_:
self.assertLen(result.globals, n_graph)
self.assertLen(result.senders, n_node**2 * n_graph)
self.assertLen(result.receivers, n_node**2 * n_graph)
np.testing.assert_allclose(result.n_node, jnp.array([n_node] * n_graph))
with self.subTest('senders_receiver_indices'):
if n_node > 0:
# [0, 1, ..., n_node - 1]
node_indices = jnp.arange(n_node)
# [0, 1,..., n_node - 1] + [0, 1,..., n_node - 1] + ... n_node times
# [0,..., 0, 1,..., 1,..., n_node - 1,..., n_node - 1] each n_node times
expected_senders = np.concatenate([node_indices] * n_node, axis=0)
expected_receivers = np.stack(
[node_indices] * n_node, axis=-1).reshape([-1])
else:
expected_senders = np.array([], dtype=np.int32)
expected_receivers = np.array([], dtype=np.int32)
# Check sender and receivers on each graph in the batch.
for result_graph in utils.unbatch(result):
np.testing.assert_allclose(result_graph.senders, expected_senders)
np.testing.assert_allclose(result_graph.receivers, expected_receivers)
@parameterized.named_parameters(('valid_1_no_feat', 1, 1),
('valid_5_no_feat', 5, 5),
('zero_nodes', 0, 1),
('zero_graphs', 1, 0),)
def test_fully_connected_graph_no_self_edges(self, n_node, n_graph):
# `test_fully_connected_graph` already tests the case `add_self_edges=True`
# so all that is left to test is that if we set `add_self_edges=False` we
# get the same edges, except for the self-edges (although order may differ).
graph_with_self_edges = utils.get_fully_connected_graph(
n_node, n_graph, add_self_edges=True)
graph_without_self_edges = utils.get_fully_connected_graph(
n_node, n_graph, add_self_edges=False)
# We will use sets to compare the order, since the order is not preserved
# due to the usage of `np.roll` (e.g. if you remove the self edges after
# add_self_edges=True, the remaining edges are in a different order than if
# add_self_edges=False).
send_recv_actual = {
(s, r) for s, r in zip(
np.asarray(graph_without_self_edges.senders),
np.asarray(graph_without_self_edges.receivers))}
# Remove the self edges by hand from `graph_with_self_edges`
mask_self_edges = (
graph_with_self_edges.senders == graph_with_self_edges.receivers)
send_recv_expected = {
(s, r) for s, r in zip(
np.asarray(graph_with_self_edges.senders[~mask_self_edges]),
np.asarray(graph_with_self_edges.receivers[~mask_self_edges]))}
self.assertSetEqual(send_recv_actual, send_recv_expected)
@parameterized.named_parameters(('with_self_edges', True),
('without_self_edges', False),)
def test_fully_connected_graph_order_edges(self, add_self_edges):
# This helps documenting the order of the output edges, so we are aware
# in case we accidentally change it.
graph_batch = utils.get_fully_connected_graph(
n_node_per_graph=3,
n_graph=1,
add_self_edges=add_self_edges)
if add_self_edges:
np.testing.assert_array_equal(
graph_batch.senders, [0, 1, 2] * 3)
np.testing.assert_array_equal(
graph_batch.receivers, [0] * 3 + [1] * 3 + [2] * 3)
else:
np.testing.assert_array_equal(graph_batch.senders, [1, 2, 2, 0, 0, 1])
np.testing.assert_array_equal(graph_batch.receivers, [0, 0, 1, 1, 2, 2])
class ConcatenatedArgsWrapperTest(parameterized.TestCase):
@parameterized.parameters(
([], {'a': np.array([10, 2])}, -1),
([np.array([10, 5])], {'a': np.array([10, 2])}, -1),
([np.array([10, 5]), np.array([10, 3])], {'a': np.array([10, 2])}, -1),
([np.array([10, 5]), np.array([10, 3])], {}, -1),
([{'a': np.array([10, 2]), 'b': np.array([10, 4])}],
{'c': np.array([10, 3])}, 1),
([{'a': np.array([2, 10]), 'b': np.array([4, 10])}],
{'c': np.array([3, 10])}, 0))
def test_single_arg(self, args_shapes, kwargs_shapes, axis):
args = tree.tree_map(lambda x: np.random.randn(*x), args_shapes)
kwargs = {k: np.random.randn(*shape) for k, shape in kwargs_shapes.items()}
@utils.concatenated_args(axis=axis)
def update_fn(feat):
return feat
out = update_fn(*args, **kwargs)
expected_out = jnp.concatenate(
list(tree.tree_flatten(args)[0]) + list(tree.tree_flatten(kwargs)[0]),
axis=axis)
np.testing.assert_allclose(out, expected_out)
_DB_NUM_NODES = (10, 15)
_DB_NODE_SHAPE = (3, 4, 1)
_DB_NUM_EDGES = (12, 17)
_DB_EDGE_SHAPE = (4, 3)
_DB_GLOBAL_SHAPE = (2, 3, 1, 4)
def _make_dynamic_batch_graph(
add_globals,
num_nodes=_DB_NUM_NODES,
num_edges=_DB_NUM_EDGES,
):
total_num_edges = sum(num_edges)
total_num_nodes = sum(num_nodes)
g_ = _make_nest(
np.random.normal(size=_DB_GLOBAL_SHAPE)) if add_globals else {}
return graph.GraphsTuple(
nodes=_make_nest(
np.random.normal(size=(total_num_nodes,) + _DB_NODE_SHAPE)),
edges=_make_nest(
np.random.normal(size=(total_num_edges,) + _DB_EDGE_SHAPE)),
n_edge=np.array(num_edges),
n_node=np.array(num_nodes),
senders=np.random.randint(
0, total_num_nodes, size=total_num_edges, dtype=np.int32),
receivers=np.random.randint(
0, total_num_nodes, size=total_num_edges, dtype=np.int32),
globals=g_)
class DynamicBatchTest(parameterized.TestCase):
def setUp(self):
super().setUp()
os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=4'
xla_bridge.get_backend.cache_clear()
self._global_graph = _make_dynamic_batch_graph(add_globals=True)
self._global_small_graph = _make_dynamic_batch_graph(
add_globals=True, num_nodes=(5, 7), num_edges=(6, 8))
@parameterized.named_parameters(
('graph_with_globals_n_node_hit', True, {
'n_node': sum(_DB_NUM_NODES) + 1,
'n_edge': sum(_DB_NUM_EDGES) + 100,
'n_graph': len(_DB_NUM_EDGES) + 100
}),
('graph_with_globals_n_edge_hit', True, {
'n_node': sum(_DB_NUM_NODES) + 100,
'n_edge': sum(_DB_NUM_EDGES),
'n_graph': len(_DB_NUM_EDGES) + 100
}),
('graph_with_globals_n_graph_hit', True, {
'n_node': sum(_DB_NUM_NODES) + 100,
'n_edge': sum(_DB_NUM_EDGES) + 100,
'n_graph': len(_DB_NUM_EDGES) + 1
}),
(
'graph_with_globals_no_budget_hit',
False,
{
# Add enough padding so not enough for a single extra graph.
'n_node': sum(_DB_NUM_NODES) + 5,
'n_edge': sum(_DB_NUM_EDGES) + 5,
'n_graph': len(_DB_NUM_EDGES) + 5
}),
(
'graph_no_globals_no_budget_hit',
False,
{
# Add enough padding so not enough for a single extra graph.
'n_node': sum(_DB_NUM_NODES) + 5,
'n_edge': sum(_DB_NUM_EDGES) + 5,
'n_graph': len(_DB_NUM_EDGES) + 5
}),
(
'graph_nests_no_globals_no_budget_hit',
False,
{
# Add enough padding so not enough for a single extra graph.
'n_node': sum(_DB_NUM_NODES) + 5,
'n_edge': sum(_DB_NUM_EDGES) + 5,
'n_graph': len(_DB_NUM_EDGES) + 5
}))
def test_dynamically_batch(self, use_globals, batch_kwargs):
def graph_iterator():
graphs = [
_make_dynamic_batch_graph(add_globals=use_globals) for x in range(4)]
return iter(graphs + utils.unbatch_np(graphs[-1]))
batched_dataset = utils.dynamically_batch(graph_iterator(),
**batch_kwargs)
batched_graphs = []
while True:
try:
batched_graphs.append(next(batched_dataset))
except StopIteration:
break
self.assertLen(batched_graphs, 5)
for batch_graphs in batched_graphs:
batch_nodes = jax.tree_util.tree_flatten(batch_graphs.nodes)[0]
for nodes in batch_nodes:
self.assertEqual(nodes.shape[0], batch_kwargs['n_node'])
batch_edges = jax.tree_util.tree_flatten(batch_graphs.edges)[0]
for edges in batch_edges:
self.assertEqual(edges.shape[0], batch_kwargs['n_edge'])
self.assertLen(batch_graphs.n_node, batch_kwargs['n_graph'])
self.assertEqual(
utils.get_number_of_padding_with_graphs_nodes(batch_graphs),
batch_kwargs['n_node'] - sum(_DB_NUM_NODES))
self.assertEqual(
utils.get_number_of_padding_with_graphs_edges(batch_graphs),
batch_kwargs['n_edge'] - sum(_DB_NUM_EDGES))
def test_too_big_graphs_tuple(self):
with self.subTest('test_too_big_nodes'):
iterator = utils.dynamically_batch(
iter([self._global_graph]), n_node=15, n_edge=50, n_graph=10)
self.assertRaisesRegex(
RuntimeError, 'Found graph bigger than batch size.*',
lambda: next(iterator))
with self.subTest('test_too_big_edges'):
iterator = utils.dynamically_batch(
iter([self._global_graph]), n_node=26, n_edge=15, n_graph=10)
self.assertRaisesRegex(
RuntimeError, 'Found graph bigger than batch size.*',
lambda: next(iterator))
with self.subTest('test_too_big_graphs'):
iterator = utils.dynamically_batch(
iter([self._global_graph]), n_node=50, n_edge=50, n_graph=1)
self.assertRaisesRegex(
ValueError, 'The number of graphs*',
lambda: next(iterator))
with self.subTest('test_too_big_fails_gracefully'):
# Ensure that dynamically_batch() returns the accumulated batch before
# raising an exception.
iterator = utils.dynamically_batch(
iter([self._global_small_graph, self._global_graph]),
n_node=15, n_edge=15, n_graph=10)
next(iterator)
self.assertRaisesRegex(
RuntimeError, 'Found graph bigger than batch size.*',
lambda: next(iterator))
def test_not_enough_graphs(self):
iterator = utils.dynamically_batch(
iter([self._global_graph]), n_node=5, n_edge=5, n_graph=1)
self.assertRaisesRegex(
ValueError, 'The number of graphs*', lambda: next(iterator))
class ZeroOutTest(parameterized.TestCase):
def _assert_values_for_graph(self, padded_graph, wrapper):
# Make padded graph values non zero.
padded_graph = padded_graph._replace(
nodes=tree.tree_map(lambda x: x - 1., padded_graph.nodes),
edges=tree.tree_map(lambda x: x - 1., padded_graph.edges),
globals=tree.tree_map(lambda x: x - 1., padded_graph.globals))
true_valid_graph = utils.unbatch(padded_graph)[0]
if wrapper:
zeroed_graph_net = utils.with_zero_out_padding_outputs(lambda x: x)
zeroed_padded_graph = zeroed_graph_net(padded_graph)
else:
zeroed_padded_graph = utils.zero_out_padding(padded_graph)
graphs = utils.unbatch(zeroed_padded_graph)
valid_graph = graphs[0]
padding_graphs = graphs[1:]
tree.tree_map(np.testing.assert_array_equal, valid_graph.nodes,
true_valid_graph.nodes)
for padding_graph in padding_graphs:
tree.tree_map(
lambda x: np.testing.assert_array_equal(x, jnp.zeros_like(x)),
padding_graph.nodes)
@parameterized.parameters(True, False)
def test_zero_padding_values(self, wrapper):
g = _get_random_graph(max_n_graph=1)
with self.subTest('test_all_padded_features'):
self._assert_values_for_graph(
utils.pad_with_graphs(g, n_node=20, n_edge=20, n_graph=3),
wrapper=wrapper)
with self.subTest('test_no_edge_features'):
self._assert_values_for_graph(
utils.pad_with_graphs(
g, n_node=sum(g.n_node) + 1, n_edge=sum(g.n_edge), n_graph=3),
wrapper=wrapper)
with self.subTest('test_no_extra_graph_features'):
self._assert_values_for_graph(
utils.pad_with_graphs(
g, n_node=sum(g.n_node) + 1, n_edge=sum(g.n_edge), n_graph=2),
wrapper=wrapper)
class AdjacencyMatrixTest(parameterized.TestCase):
def test_sparse_matrix_to_graphs_tuple(self):
"""Tests sparse COO matrix is correctly converted to a GraphsTuple."""
_, sparse_adj_matrices, expected_graphs = _get_list_matrix()
for (sparse_matrix,
expected_graph) in zip(sparse_adj_matrices, expected_graphs):
senders, receivers, values, n_node = sparse_matrix
from_sparse_graph = utils.sparse_matrix_to_graphs_tuple(
senders, receivers, values, n_node)
jax.tree_util.tree_map(np.testing.assert_allclose,
from_sparse_graph, expected_graph)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/_src/utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of Graph Neural Network models."""
import functools
from typing import Any, Callable, Iterable, Mapping, Optional, Union
import jax
import jax.numpy as jnp
import jax.tree_util as tree
from jraph._src import graph as gn_graph
from jraph._src import utils
# As of 04/2020 pytype doesn't support recursive types.
# pytype: disable=not-supported-yet
ArrayTree = Union[jnp.ndarray, Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
# All features will be an ArrayTree.
NodeFeatures = EdgeFeatures = SenderFeatures = ReceiverFeatures = Globals = ArrayTree
# Signature:
# (edges of each node to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToNodesFn = Callable[
[EdgeFeatures, jnp.ndarray, int], NodeFeatures]
# Signature:
# (nodes of each graph to be aggregated, segment ids, number of segments) ->
# aggregated nodes
AggregateNodesToGlobalsFn = Callable[[NodeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edges of each graph to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToGlobalsFn = Callable[[EdgeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# attention weights
AttentionLogitFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], ArrayTree]
# Signature:
# (edge features, weights) -> edge features for node update
AttentionReduceFn = Callable[[EdgeFeatures, ArrayTree], EdgeFeatures]
# Signature:
# (edges to be normalized, segment ids, number of segments) ->
# normalized edges
AttentionNormalizeFn = Callable[[EdgeFeatures, jnp.ndarray, int], EdgeFeatures]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# updated edge features
GNUpdateEdgeFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], EdgeFeatures]
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
GNUpdateNodeFn = Callable[
[NodeFeatures, SenderFeatures, ReceiverFeatures, Globals], NodeFeatures]
GNUpdateGlobalFn = Callable[[NodeFeatures, EdgeFeatures, Globals], Globals]
def GraphNetwork(
update_edge_fn: Optional[GNUpdateEdgeFn],
update_node_fn: Optional[GNUpdateNodeFn],
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_edges_for_nodes_fn: AggregateEdgesToNodesFn = utils.segment_sum,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils
.segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils
.segment_sum,
attention_logit_fn: Optional[AttentionLogitFn] = None,
attention_normalize_fn: Optional[AttentionNormalizeFn] = utils
.segment_softmax,
attention_reduce_fn: Optional[AttentionReduceFn] = None):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
attention_logit_fn: function used to calculate the attention weights or
None to deactivate attention mechanism.
attention_normalize_fn: function used to normalize raw attention logits or
None if attention mechanism is not active.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
Returns:
A method that applies the configured GraphNetwork.
"""
not_both_supplied = lambda x, y: (x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
def _ApplyGraphNet(graph):
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
# Equivalent to jnp.sum(n_node), but jittable
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
sum_n_edge = senders.shape[0]
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
global_edge_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_edge, axis=0, total_repeat_length=sum_n_edge), globals_)
if update_edge_fn:
edges = update_edge_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
if attention_logit_fn:
logits = attention_logit_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
tree_calculate_weights = functools.partial(
attention_normalize_fn,
segment_ids=receivers,
num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
edges = attention_reduce_fn(edges, weights)
if update_node_fn:
sent_attributes = tree.tree_map(
lambda e: aggregate_edges_for_nodes_fn(e, senders, sum_n_node), edges)
received_attributes = tree.tree_map(
lambda e: aggregate_edges_for_nodes_fn(e, receivers, sum_n_node),
edges)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
global_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
nodes = update_node_fn(nodes, sent_attributes,
received_attributes, global_attributes)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
graph_idx, n_edge, axis=0, total_repeat_length=sum_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(e, edge_gr_idx, n_graph),
edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(node_attributes, edge_attribtutes, globals_)
# pylint: enable=g-long-lambda
return gn_graph.GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge)
return _ApplyGraphNet
InteractionUpdateNodeFn = Callable[
[NodeFeatures,
Mapping[str, SenderFeatures],
Mapping[str, ReceiverFeatures]],
NodeFeatures]
InteractionUpdateNodeFnNoSentEdges = Callable[
[NodeFeatures,
Mapping[str, ReceiverFeatures]],
NodeFeatures]
InteractionUpdateEdgeFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures], EdgeFeatures]
def InteractionNetwork(
update_edge_fn: InteractionUpdateEdgeFn,
update_node_fn: Union[InteractionUpdateNodeFn,
InteractionUpdateNodeFnNoSentEdges],
aggregate_edges_for_nodes_fn: AggregateEdgesToNodesFn = utils.segment_sum,
include_sent_messages_in_node_update: bool = False):
"""Returns a method that applies a configured InteractionNetwork.
An interaction network computes interactions on the edges based on the
previous edges features, and on the features of the nodes sending into those
edges. It then updates the nodes based on the incoming updated edges.
See https://arxiv.org/abs/1612.00222 for more details.
This implementation adds an option not in https://arxiv.org/abs/1612.00222,
which is to include edge features for which a node is a sender in the
arguments to the node update function.
Args:
update_edge_fn: a function mapping a single edge update inputs to a single
edge feature.
update_node_fn: a function mapping a single node update input to a single
node feature.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
include_sent_messages_in_node_update: pass edge features for which a node is
a sender to the node update function.
"""
# An InteractionNetwork is a GraphNetwork without globals features,
# so we implement the InteractionNetwork as a configured GraphNetwork.
# An InteractionNetwork edge function does not have global feature inputs,
# so we filter the passed global argument in the GraphNetwork.
wrapped_update_edge_fn = lambda e, s, r, g: update_edge_fn(e, s, r)
# Similarly, we wrap the update_node_fn to ensure only the expected
# arguments are passed to the Interaction net.
if include_sent_messages_in_node_update:
wrapped_update_node_fn = lambda n, s, r, g: update_node_fn(n, s, r) # pytype: disable=wrong-arg-count
else:
wrapped_update_node_fn = lambda n, s, r, g: update_node_fn(n, r) # pytype: disable=wrong-arg-count
return GraphNetwork(
update_edge_fn=wrapped_update_edge_fn,
update_node_fn=wrapped_update_node_fn,
aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn)
# Signature:
# edge features -> embedded edge features
EmbedEdgeFn = Callable[[EdgeFeatures], EdgeFeatures]
# Signature:
# node features -> embedded node features
EmbedNodeFn = Callable[[NodeFeatures], NodeFeatures]
# Signature:
# globals features -> embedded globals features
EmbedGlobalFn = Callable[[Globals], Globals]
def GraphMapFeatures(embed_edge_fn: Optional[EmbedEdgeFn] = None,
embed_node_fn: Optional[EmbedNodeFn] = None,
embed_global_fn: Optional[EmbedGlobalFn] = None):
"""Returns function which embeds the components of a graph independently.
Args:
embed_edge_fn: function used to embed the edges.
embed_node_fn: function used to embed the nodes.
embed_global_fn: function used to embed the globals.
"""
identity = lambda x: x
embed_edges_fn = embed_edge_fn if embed_edge_fn else identity
embed_nodes_fn = embed_node_fn if embed_node_fn else identity
embed_global_fn = embed_global_fn if embed_global_fn else identity
def Embed(graphs_tuple):
return graphs_tuple._replace(
nodes=embed_nodes_fn(graphs_tuple.nodes),
edges=embed_edges_fn(graphs_tuple.edges),
globals=embed_global_fn(graphs_tuple.globals))
return Embed
def RelationNetwork(
update_edge_fn: Callable[[SenderFeatures, ReceiverFeatures], EdgeFeatures],
update_global_fn: Callable[[EdgeFeatures], NodeFeatures],
aggregate_edges_for_globals_fn:
AggregateEdgesToGlobalsFn = utils.segment_sum):
"""Returns a method that applies a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
This implementation has one more argument, `aggregate_edges_for_globals_fn`,
which changes how edge features are aggregated. The paper uses the default -
`utils.segment_sum`.
Args:
update_edge_fn: function used to update the edges.
update_global_fn: function used to update the globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
"""
return GraphNetwork(
update_edge_fn=lambda e, s, r, g: update_edge_fn(s, r),
update_node_fn=None,
update_global_fn=lambda n, e, g: update_global_fn(e),
attention_logit_fn=None,
aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn)
def DeepSets(
update_node_fn: Callable[[NodeFeatures, Globals], NodeFeatures],
update_global_fn: Callable[[NodeFeatures], Globals],
aggregate_nodes_for_globals_fn:
AggregateNodesToGlobalsFn = utils.segment_sum):
"""Returns a method that applies a DeepSets layer.
Implementation for the model described in https://arxiv.org/abs/1703.06114
(M. Zaheer, S. Kottur, S. Ravanbakhsh, B. Poczos, R. Salakhutdinov, A. Smola).
See also PointNet (https://arxiv.org/abs/1612.00593, C. Qi, H. Su, K. Mo,
L. J. Guibas) for a related model.
This module operates on sets, which can be thought of as graphs without
edges. The nodes features are first updated based on their value and the
globals features, and new globals features are then computed based on the
updated nodes features.
Args:
update_node_fn: function used to update the nodes.
update_global_fn: function used to update the globals.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
"""
# DeepSets can be implemented with a GraphNetwork, with just a node
# update function that takes nodes and globals, and a global update
# function based on the updated node features.
return GraphNetwork(
update_edge_fn=None,
update_node_fn=lambda n, s, r, g: update_node_fn(n, g),
update_global_fn=lambda n, e, g: update_global_fn(n),
aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn)
def GraphNetGAT(
update_edge_fn: GNUpdateEdgeFn,
update_node_fn: GNUpdateNodeFn,
attention_logit_fn: AttentionLogitFn,
attention_reduce_fn: AttentionReduceFn,
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_edges_for_nodes_fn: AggregateEdgesToNodesFn = utils.segment_sum,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils.
segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils.
segment_sum
):
"""Returns a method that applies a GraphNet with attention on edge features.
Args:
update_edge_fn: function used to update the edges.
update_node_fn: function used to update the nodes.
attention_logit_fn: function used to calculate the attention weights.
attention_reduce_fn: function used to apply attention weights to the edge
features.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate attention-weighted
messages to each node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate
attention-weighted edges for the globals.
Returns:
A function that applies a GraphNet Graph Attention layer.
"""
if (attention_logit_fn is None) or (attention_reduce_fn is None):
raise ValueError(('`None` value not supported for `attention_logit_fn` or '
'`attention_reduce_fn` in a Graph Attention network.'))
return GraphNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn,
update_global_fn=update_global_fn,
attention_logit_fn=attention_logit_fn,
attention_reduce_fn=attention_reduce_fn,
aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn,
aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn,
aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn)
GATAttentionQueryFn = Callable[[NodeFeatures], NodeFeatures]
GATAttentionLogitFn = Callable[
[SenderFeatures, ReceiverFeatures, EdgeFeatures], EdgeFeatures]
GATNodeUpdateFn = Callable[[NodeFeatures], NodeFeatures]
def GAT(attention_query_fn: GATAttentionQueryFn,
attention_logit_fn: GATAttentionLogitFn,
node_update_fn: Optional[GATNodeUpdateFn] = None):
"""Returns a method that applies a Graph Attention Network layer.
Graph Attention message passing as described in
https://arxiv.org/abs/1710.10903. This model expects node features as a
jnp.array, may use edge features for computing attention weights, and
ignore global features. It does not support nests.
NOTE: this implementation assumes that the input graph has self edges. To
recover the behavior of the referenced paper, please add self edges.
Args:
attention_query_fn: function that generates attention queries
from sender node features.
attention_logit_fn: function that converts attention queries into logits for
softmax attention.
node_update_fn: function that updates the aggregated messages. If None,
will apply leaky relu and concatenate (if using multi-head attention).
Returns:
A function that applies a Graph Attention layer.
"""
# pylint: disable=g-long-lambda
if node_update_fn is None:
# By default, apply the leaky relu and then concatenate the heads on the
# feature axis.
node_update_fn = lambda x: jnp.reshape(
jax.nn.leaky_relu(x), (x.shape[0], -1))
def _ApplyGAT(graph):
"""Applies a Graph Attention layer."""
nodes, edges, receivers, senders, _, _, _ = graph
# Equivalent to the sum of n_node, but statically known.
try:
sum_n_node = nodes.shape[0]
except IndexError:
raise IndexError('GAT requires node features') # pylint: disable=raise-missing-from
# First pass nodes through the node updater.
nodes = attention_query_fn(nodes)
# pylint: disable=g-long-lambda
# We compute the softmax logits using a function that takes the
# embedded sender and receiver attributes.
sent_attributes = nodes[senders]
received_attributes = nodes[receivers]
softmax_logits = attention_logit_fn(
sent_attributes, received_attributes, edges)
# Compute the softmax weights on the entire tree.
weights = utils.segment_softmax(softmax_logits, segment_ids=receivers,
num_segments=sum_n_node)
# Apply weights
messages = sent_attributes * weights
# Aggregate messages to nodes.
nodes = utils.segment_sum(messages, receivers, num_segments=sum_n_node)
# Apply an update function to the aggregated messages.
nodes = node_update_fn(nodes)
return graph._replace(nodes=nodes)
# pylint: enable=g-long-lambda
return _ApplyGAT
def GraphConvolution(
update_node_fn: Callable[[NodeFeatures], NodeFeatures],
aggregate_nodes_fn: AggregateEdgesToNodesFn = utils.segment_sum,
add_self_edges: bool = False,
symmetric_normalization: bool = True):
"""Returns a method that applies a Graph Convolution layer.
Graph Convolutional layer as in https://arxiv.org/abs/1609.02907,
NOTE: This implementation does not add an activation after aggregation.
If you are stacking layers, you may want to add an activation between
each layer.
Args:
update_node_fn: function used to update the nodes. In the paper a single
layer MLP is used.
aggregate_nodes_fn: function used to aggregates the sender nodes.
add_self_edges: whether to add self edges to nodes in the graph as in the
paper definition of GCN. Defaults to False.
symmetric_normalization: whether to use symmetric normalization. Defaults
to True. Note that to replicate the fomula of the linked paper, the
adjacency matrix must be symmetric. If the adjacency matrix is not
symmetric the data is prenormalised by the sender degree matrix and post
normalised by the receiver degree matrix.
Returns:
A method that applies a Graph Convolution layer.
"""
def _ApplyGCN(graph):
"""Applies a Graph Convolution layer."""
nodes, _, receivers, senders, _, _, _ = graph
# First pass nodes through the node updater.
nodes = update_node_fn(nodes)
# Equivalent to jnp.sum(n_node), but jittable
total_num_nodes = tree.tree_leaves(nodes)[0].shape[0]
if add_self_edges:
# We add self edges to the senders and receivers so that each node
# includes itself in aggregation.
# In principle, a `GraphsTuple` should partition by n_edge, but in
# this case it is not required since a GCN is agnostic to whether
# the `GraphsTuple` is a batch of graphs or a single large graph.
conv_receivers = jnp.concatenate((receivers, jnp.arange(total_num_nodes)),
axis=0)
conv_senders = jnp.concatenate((senders, jnp.arange(total_num_nodes)),
axis=0)
else:
conv_senders = senders
conv_receivers = receivers
# pylint: disable=g-long-lambda
if symmetric_normalization:
# Calculate the normalization values.
count_edges = lambda x: utils.segment_sum(
jnp.ones_like(conv_senders), x, total_num_nodes)
sender_degree = count_edges(conv_senders)
receiver_degree = count_edges(conv_receivers)
# Pre normalize by sqrt sender degree.
# Avoid dividing by 0 by taking maximum of (degree, 1).
nodes = tree.tree_map(
lambda x: x * jax.lax.rsqrt(jnp.maximum(sender_degree, 1.0))[:, None],
nodes,
)
# Aggregate the pre normalized nodes.
nodes = tree.tree_map(
lambda x: aggregate_nodes_fn(x[conv_senders], conv_receivers,
total_num_nodes), nodes)
# Post normalize by sqrt receiver degree.
# Avoid dividing by 0 by taking maximum of (degree, 1).
nodes = tree.tree_map(
lambda x:
(x * jax.lax.rsqrt(jnp.maximum(receiver_degree, 1.0))[:, None]),
nodes,
)
else:
nodes = tree.tree_map(
lambda x: aggregate_nodes_fn(x[conv_senders], conv_receivers,
total_num_nodes), nodes)
# pylint: enable=g-long-lambda
return graph._replace(nodes=nodes)
return _ApplyGCN
| jraph-master | jraph/_src/models.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.